aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /drivers/net/ethernet
parentMerge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 (diff)
parentMerge branch 'tipc-refactor-socket-receive-functions' (diff)
downloadlinux-dev-8d65b08debc7e62b2c6032d7fe7389d895b92cbc.tar.xz
linux-dev-8d65b08debc7e62b2c6032d7fe7389d895b92cbc.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/typhoon.c7
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h7
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c49
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/apm/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/Makefile1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig11
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Makefile6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.c83
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.h45
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.c187
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.h78
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c116
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.h107
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c759
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h80
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c168
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.c81
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.h119
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c74
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c128
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c103
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c34
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c52
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c228
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c427
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h325
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c292
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c52
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c58
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c86
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c459
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c399
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c137
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c391
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c397
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h32
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c75
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h4
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c42
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c45
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c140
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1914
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h45
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c184
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c45
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c76
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h47
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c127
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c80
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c249
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c153
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c400
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c270
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h23
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c1730
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h50
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c117
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h28
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c121
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c105
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h68
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c68
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c16
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c119
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c106
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h274
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h99
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c474
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c247
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c83
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1621
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1352
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h116
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h218
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c371
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h99
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c220
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h229
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c470
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h122
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h80
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c564
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h166
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c135
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c242
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h21
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h81
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c943
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c70
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c41
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c202
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c572
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c304
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c107
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1313
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c607
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2070
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c682
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c808
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c498
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.h (renamed from drivers/net/ethernet/qlogic/qed/qed_ptp.h)41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h247
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c942
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h141
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1517
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c41
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c28
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h184
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1320
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c245
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c238
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c467
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c345
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c106
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c (renamed from drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h)94
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c424
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c33
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c189
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1566
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1727
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c59
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c373
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1346
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h225
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c199
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c338
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c330
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h62
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h97
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c536
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c93
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c356
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c188
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c14
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c45
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c72
-rw-r--r--drivers/net/ethernet/sfc/ef10.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/silan/sc92031.c83
-rw-r--r--drivers/net/ethernet/sis/sis190.c14
-rw-r--r--drivers/net/ethernet/sis/sis900.c18
-rw-r--r--drivers/net/ethernet/smsc/epic100.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c51
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h103
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c412
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1835
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c156
-rw-r--r--drivers/net/ethernet/sun/cassini.c98
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c27
-rw-r--r--drivers/net/ethernet/sun/niu.c37
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c18
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h1
-rw-r--r--drivers/net/ethernet/sun/sungem.c99
-rw-r--r--drivers/net/ethernet/sun/sunhme.c86
-rw-r--r--drivers/net/ethernet/sun/sunhme.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c116
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c56
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h27
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig41
-rw-r--r--drivers/net/ethernet/synopsys/Makefile10
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c737
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c644
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c275
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c3147
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1350
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c78
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h744
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h660
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c43
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c51
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c24
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.c62
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
443 files changed, 47511 insertions, 17902 deletions
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 084a6d58543a..be823c186517 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -283,7 +283,6 @@ struct typhoon {
spinlock_t command_lock ____cacheline_aligned;
struct basic_ring cmdRing;
struct basic_ring respRing;
- struct net_device_stats stats;
struct net_device_stats stats_saved;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
@@ -898,7 +897,7 @@ typhoon_set_rx_mode(struct net_device *dev)
static int
typhoon_do_get_stats(struct typhoon *tp)
{
- struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[7];
@@ -951,7 +950,7 @@ static struct net_device_stats *
typhoon_get_stats(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
- struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
@@ -1991,7 +1990,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
tp->card_state = Sleeping;
smp_wmb();
typhoon_do_get_stats(tp);
- memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
+ memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8c08f9deef92..edae15ac0e98 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 26dce5bf2c18..bf7f4502cabc 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 8c3b56198e4b..4ad5b9be3f84 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -68,13 +68,6 @@ struct net_dma_desc_tx {
};
struct bfin_mac_local {
- /*
- * these are things that the kernel wants me to keep, so users
- * can find out semi-useless statistics of how well the card is
- * performing
- */
- struct net_device_stats stats;
-
spinlock_t lock;
int wol; /* Wake On Lan */
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 9f7422ada704..d8e133ced7b8 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -34,6 +34,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
@@ -1454,11 +1455,10 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- const unsigned char *addr;
- int len;
- addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
- &len);
- if (addr != NULL && len == 6) {
+ const u8 *addr;
+
+ addr = of_get_mac_address(ofdev->dev.of_node);
+ if (addr) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 35f19430c84a..7c1214d78855 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -133,7 +133,7 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- adapter->msix_entries[irq_idx].vector);
+ pci_irq_vector(adapter->pdev, irq_idx));
if (rc) {
free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
adapter->netdev->rx_cpu_rmap = NULL;
@@ -1208,13 +1208,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
{
- int i, msix_vecs, rc;
-
- if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
- netif_err(adapter, probe, adapter->netdev,
- "Error, MSI-X is already enabled\n");
- return -EPERM;
- }
+ int msix_vecs, rc;
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
@@ -1222,16 +1216,9 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
- adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry));
-
- if (!adapter->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < msix_vecs; i++)
- adapter->msix_entries[i].entry = i;
-
- rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs);
- if (rc != 0) {
+ rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
netif_err(adapter, probe, adapter->netdev,
"Failed to enable MSI-X, vectors %d rc %d\n",
msix_vecs, rc);
@@ -1248,7 +1235,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
adapter->msix_vecs = msix_vecs;
- set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
return 0;
}
@@ -1264,7 +1250,7 @@ static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
ena_intr_msix_mgmnt;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
- adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+ pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
cpu = cpumask_first(cpu_online_mask);
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
cpumask_set_cpu(cpu,
@@ -1287,7 +1273,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
adapter->irq_tbl[irq_idx].vector =
- adapter->msix_entries[irq_idx].vector;
+ pci_irq_vector(adapter->pdev, irq_idx);
adapter->irq_tbl[irq_idx].cpu = cpu;
cpumask_set_cpu(cpu,
@@ -1325,12 +1311,6 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int rc = 0, i, k;
- if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to request I/O IRQ: MSI-X is not enabled\n");
- return -EINVAL;
- }
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
@@ -1389,16 +1369,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
}
}
-static void ena_disable_msix(struct ena_adapter *adapter)
-{
- if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
- pci_disable_msix(adapter->pdev);
-
- if (adapter->msix_entries)
- vfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
-}
-
static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
{
int i;
@@ -2479,8 +2449,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
return 0;
err_disable_msix:
- ena_disable_msix(adapter);
-
+ pci_free_irq_vectors(adapter->pdev);
return rc;
}
@@ -2518,7 +2487,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
ena_com_abort_admin_commands(ena_dev);
@@ -2569,7 +2538,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
@@ -3103,7 +3072,7 @@ err_rss:
err_free_msix:
ena_com_dev_reset(ena_dev);
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
@@ -3188,7 +3157,7 @@ static void ena_remove(struct pci_dev *pdev)
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index ed62d8e231a1..0e22bce6239d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -248,7 +248,6 @@ enum ena_flags_t {
ENA_FLAG_DEVICE_RUNNING,
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
- ENA_FLAG_MSIX_ENABLED,
ENA_FLAG_TRIGGER_RESET
};
@@ -267,7 +266,6 @@ struct ena_adapter {
int num_queues;
- struct msix_entry *msix_entries;
int msix_vecs;
u32 tx_usecs, rx_usecs; /* interrupt moderation */
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index b556c926557a..9c152d85840d 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -359,7 +359,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
- struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
/* restore_multicast_list() state variables */
@@ -879,7 +878,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
service a transmit interrupt while we are in here.
*/
- lp->linux_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
lp->tx_free_frames--;
/* WARNING: Write the _exact_ number of bytes written in the header! */
@@ -967,7 +966,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
- lp->linux_stats.tx_errors++;
+ dev->stats.tx_errors++;
outb(0xFF, ioaddr + AM2150_XMT_SKIP);
}
@@ -1016,7 +1015,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
} /* if (xmtfs & MACE_XMTFS_XMTSV) */
- lp->linux_stats.tx_packets++;
+ dev->stats.tx_packets++;
lp->tx_free_frames++;
netif_wake_queue(dev);
} /* if (status & MACE_IR_XMTINT) */
@@ -1077,7 +1076,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
" 0x%X.\n", dev->name, rx_framecnt, rx_status);
if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
- lp->linux_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (rx_status & MACE_RCVFS_OFLO) {
lp->mace_stats.oflo++;
}
@@ -1114,14 +1113,14 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
- lp->linux_stats.rx_packets++;
- lp->linux_stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
continue;
} else {
pr_debug("%s: couldn't allocate a sk_buff of size"
" %d.\n", dev->name, pkt_len);
- lp->linux_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
}
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
@@ -1231,13 +1230,13 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
/* At this point, mace_stats is fully updated for this call.
- We may now update the linux_stats. */
+ We may now update the netdev stats. */
- /* The MACE has no equivalent for linux_stats field which are commented
+ /* The MACE has no equivalent for netdev stats field which are commented
out. */
- /* lp->linux_stats.multicast; */
- lp->linux_stats.collisions =
+ /* dev->stats.multicast; */
+ dev->stats.collisions =
lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
/* Collision: The MACE may retry sending a packet 15 times
before giving up. The retry count is in XMTRC.
@@ -1245,22 +1244,22 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
If so, why doesn't the RCVCC record these collisions? */
/* detailed rx_errors: */
- lp->linux_stats.rx_length_errors =
+ dev->stats.rx_length_errors =
lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
- /* lp->linux_stats.rx_over_errors */
- lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
- lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
- lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
- lp->linux_stats.rx_missed_errors =
+ /* dev->stats.rx_over_errors */
+ dev->stats.rx_crc_errors = lp->mace_stats.fcs;
+ dev->stats.rx_frame_errors = lp->mace_stats.fram;
+ dev->stats.rx_fifo_errors = lp->mace_stats.oflo;
+ dev->stats.rx_missed_errors =
lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
/* detailed tx_errors */
- lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
- lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ dev->stats.tx_aborted_errors = lp->mace_stats.rtry;
+ dev->stats.tx_carrier_errors = lp->mace_stats.lcar;
/* LCAR usually results from bad cabling. */
- lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
- lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
- /* lp->linux_stats.tx_window_errors; */
+ dev->stats.tx_fifo_errors = lp->mace_stats.uflo;
+ dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* dev->stats.tx_window_errors; */
} /* update_stats */
/* ----------------------------------------------------------------------------
@@ -1274,10 +1273,10 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
update_stats(dev->base_addr, dev);
pr_debug("%s: updating the statistics.\n", dev->name);
- pr_linux_stats(&lp->linux_stats);
+ pr_linux_stats(&dev->stats);
pr_mace_stats(&lp->mace_stats);
- return &lp->linux_stats;
+ return &dev->stats;
} /* net_device_stats */
/* ----------------------------------------------------------------------------
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a713abd9d03e..c772420fa41c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -118,6 +118,7 @@
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
#include <net/busy_poll.h>
#include <linux/clk.h>
#include <linux/if_ether.h>
@@ -1854,7 +1855,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
if (tc_to_netdev->type != TC_SETUP_MQPRIO)
return -EINVAL;
- tc = tc_to_netdev->tc;
+ tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ tc = tc_to_netdev->mqprio->num_tc;
if (tc > pdata->hw_feat.tc_cnt)
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 0c7088a426e9..417bdb5982a9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -115,6 +115,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/completion.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4c5b90eea4af..b672d9249539 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/mdio.h>
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
index ec63d706d464..59efe5b145dd 100644
--- a/drivers/net/ethernet/apm/Kconfig
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -1 +1,2 @@
source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
index 65ce32ad1b2c..946b2a4c882d 100644
--- a/drivers/net/ethernet/apm/Makefile
+++ b/drivers/net/ethernet/apm/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 000000000000..1205861b6318
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,11 @@
+config NET_XGENE_V2
+ tristate "APM X-Gene SoC Ethernet-v2 Driver"
+ depends on HAS_DMA
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This is the Ethernet driver for the on-chip ethernet interface
+ which uses a linked list of DMA descriptor architecture (v2) for
+ APM X-Gene SoCs.
+
+ To compile this driver as a module, choose M here. This module will
+ be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 000000000000..f16a2b3dde8b
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644
index 000000000000..5998da014923
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.c
@@ -0,0 +1,83 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ return ioread32(addr);
+}
+
+int xge_port_reset(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ u32 data, wait = 10;
+
+ xge_wr_csr(pdata, ENET_CLKEN, 0x3);
+ xge_wr_csr(pdata, ENET_SRST, 0xf);
+ xge_wr_csr(pdata, ENET_SRST, 0);
+ xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1);
+ xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0);
+
+ do {
+ usleep_range(100, 110);
+ data = xge_rd_csr(pdata, BLOCK_MEM_RDY);
+ } while (data != MEM_RDY && wait--);
+
+ if (data != MEM_RDY) {
+ dev_err(dev, "ECC init failed: %x\n", data);
+ return -ETIMEDOUT;
+ }
+
+ xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
+
+ return 0;
+}
+
+static void xge_traffic_resume(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
+ xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
+
+ xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
+ xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
+}
+
+void xge_port_init(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ pdata->phy_speed = SPEED_1000;
+ xge_mac_init(pdata);
+ xge_traffic_resume(ndev);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644
index 000000000000..3fd36dc66a23
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.h
@@ -0,0 +1,45 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ENET_H__
+#define __XGENE_ENET_V2_ENET_H__
+
+#define ENET_CLKEN 0xc008
+#define ENET_SRST 0xc000
+#define ENET_SHIM 0xc010
+#define CFG_MEM_RAM_SHUTDOWN 0xd070
+#define BLOCK_MEM_RDY 0xd074
+
+#define MEM_RDY 0xffffffff
+#define DEVM_ARAUX_COH BIT(19)
+#define DEVM_AWAUX_COH BIT(3)
+
+#define CFG_FORCE_LINK_STATUS_EN 0x229c
+#define FORCE_LINK_STATUS 0x22a0
+#define CFG_LINK_AGGR_RESUME 0x27c8
+#define RX_DV_GATE_REG 0x2dfc
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
+int xge_port_reset(struct net_device *ndev);
+void xge_port_init(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
new file mode 100644
index 000000000000..b6666e418e79
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -0,0 +1,187 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+#define XGE_STAT(m) { #m, offsetof(struct xge_pdata, stats.m) }
+#define XGE_EXTD_STAT(m, n) \
+ { \
+ #m, \
+ n, \
+ 0 \
+ }
+
+static const struct xge_gstrings_stats gstrings_stats[] = {
+ XGE_STAT(rx_packets),
+ XGE_STAT(tx_packets),
+ XGE_STAT(rx_bytes),
+ XGE_STAT(tx_bytes),
+ XGE_STAT(rx_errors)
+};
+
+static struct xge_gstrings_extd_stats gstrings_extd_stats[] = {
+ XGE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64),
+ XGE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127),
+ XGE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255),
+ XGE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511),
+ XGE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K),
+ XGE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX),
+ XGE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV),
+ XGE_EXTD_STAT(rx_fcs_error_cntr, RFCS),
+ XGE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA),
+ XGE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA),
+ XGE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF),
+ XGE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF),
+ XGE_EXTD_STAT(rx_unk_opcode_cntr, RXUO),
+ XGE_EXTD_STAT(rx_align_err_cntr, RALN),
+ XGE_EXTD_STAT(rx_frame_len_err_cntr, RFLR),
+ XGE_EXTD_STAT(rx_code_err_cntr, RCDE),
+ XGE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE),
+ XGE_EXTD_STAT(rx_undersize_pkt_cntr, RUND),
+ XGE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR),
+ XGE_EXTD_STAT(rx_fragments_cntr, RFRG),
+ XGE_EXTD_STAT(rx_jabber_cntr, RJBR),
+ XGE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP),
+ XGE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA),
+ XGE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA),
+ XGE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF),
+ XGE_EXTD_STAT(tx_defer_pkt_cntr, TDFR),
+ XGE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF),
+ XGE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL),
+ XGE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL),
+ XGE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL),
+ XGE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL),
+ XGE_EXTD_STAT(tx_total_col_cntr, TNCL),
+ XGE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH),
+ XGE_EXTD_STAT(tx_drop_frame_cntr, TDRP),
+ XGE_EXTD_STAT(tx_jabber_frame_cntr, TJBR),
+ XGE_EXTD_STAT(tx_fcs_error_cntr, TFCS),
+ XGE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF),
+ XGE_EXTD_STAT(tx_oversize_frame_cntr, TOVR),
+ XGE_EXTD_STAT(tx_undersize_frame_cntr, TUND),
+ XGE_EXTD_STAT(tx_fragments_cntr, TFRG)
+};
+
+#define XGE_STATS_LEN ARRAY_SIZE(gstrings_stats)
+#define XGE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats)
+
+static void xge_mac_get_extd_stats(struct xge_pdata *pdata)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+ data = xge_rd_csr(pdata, gstrings_extd_stats[i].addr);
+ gstrings_extd_stats[i].value += data;
+ }
+}
+
+static void xge_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct platform_device *pdev = pdata->pdev;
+
+ strcpy(info->driver, "xgene-enet-v2");
+ strcpy(info->version, XGENE_ENET_V2_VERSION);
+ snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
+ sprintf(info->bus_info, "%s", pdev->name);
+}
+
+static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < XGE_STATS_LEN; i++) {
+ memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+ memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static int xge_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return XGE_STATS_LEN + XGE_EXTD_STATS_LEN;
+}
+
+static void xge_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ void *pdata = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < XGE_STATS_LEN; i++)
+ *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+
+ xge_mac_get_extd_stats(pdata);
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++)
+ *data++ = gstrings_extd_stats[i].value;
+}
+
+static int xge_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_get(phydev, cmd);
+}
+
+static int xge_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+static const struct ethtool_ops xge_ethtool_ops = {
+ .get_drvinfo = xge_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = xge_get_strings,
+ .get_sset_count = xge_get_sset_count,
+ .get_ethtool_stats = xge_get_ethtool_stats,
+ .get_link_ksettings = xge_get_link_ksettings,
+ .set_link_ksettings = xge_set_link_ksettings,
+};
+
+void xge_set_ethtool_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &xge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.h b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
new file mode 100644
index 000000000000..54b48d5561b8
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
@@ -0,0 +1,78 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ETHTOOL_H__
+#define __XGENE_ENET_V2_ETHTOOL_H__
+
+struct xge_gstrings_stats {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+struct xge_gstrings_extd_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 addr;
+ u32 value;
+};
+
+#define TR64 0xa080
+#define TR127 0xa084
+#define TR255 0xa088
+#define TR511 0xa08c
+#define TR1K 0xa090
+#define TRMAX 0xa094
+#define TRMGV 0xa098
+#define RFCS 0xa0a4
+#define RMCA 0xa0a8
+#define RBCA 0xa0ac
+#define RXCF 0xa0b0
+#define RXPF 0xa0b4
+#define RXUO 0xa0b8
+#define RALN 0xa0bc
+#define RFLR 0xa0c0
+#define RCDE 0xa0c4
+#define RCSE 0xa0c8
+#define RUND 0xa0cc
+#define ROVR 0xa0d0
+#define RFRG 0xa0d4
+#define RJBR 0xa0d8
+#define RDRP 0xa0dc
+#define TMCA 0xa0e8
+#define TBCA 0xa0ec
+#define TXPF 0xa0f0
+#define TDFR 0xa0f4
+#define TEDF 0xa0f8
+#define TSCL 0xa0fc
+#define TMCL 0xa100
+#define TLCL 0xa104
+#define TXCL 0xa108
+#define TNCL 0xa10c
+#define TPFH 0xa110
+#define TDRP 0xa114
+#define TJBR 0xa118
+#define TFCS 0xa11c
+#define TXCF 0xa120
+#define TOVR 0xa124
+#define TUND 0xa128
+#define TFRG 0xa12c
+
+void xge_set_ethtool_ops(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_ETHTOOL_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644
index 000000000000..ee431e397e57
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -0,0 +1,116 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_mac_reset(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
+ xge_wr_csr(pdata, MAC_CONFIG_1, 0);
+}
+
+void xge_mac_set_speed(struct xge_pdata *pdata)
+{
+ u32 icm0, icm2, ecm0, mc2;
+ u32 intf_ctrl, rgmii;
+
+ icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
+ icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
+ ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
+ rgmii = xge_rd_csr(pdata, RGMII_REG_0);
+ mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
+ intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
+ icm2 |= CFG_WAITASYNCRD_EN;
+
+ switch (pdata->phy_speed) {
+ case SPEED_10:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 0);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ case SPEED_100:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 1);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ default:
+ SET_REG_BITS(&mc2, INTF_MODE, 2);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 2);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
+ break;
+ }
+
+ mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
+ SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
+
+ xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
+ xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
+ xge_wr_csr(pdata, RGMII_REG_0, rgmii);
+ xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
+ xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
+ xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
+}
+
+void xge_mac_set_station_addr(struct xge_pdata *pdata)
+{
+ u8 *dev_addr = pdata->ndev->dev_addr;
+ u32 addr0, addr1;
+
+ addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+ addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+ xge_wr_csr(pdata, STATION_ADDR0, addr0);
+ xge_wr_csr(pdata, STATION_ADDR1, addr1);
+}
+
+void xge_mac_init(struct xge_pdata *pdata)
+{
+ xge_mac_reset(pdata);
+ xge_mac_set_speed(pdata);
+ xge_mac_set_station_addr(pdata);
+}
+
+void xge_mac_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data |= TX_EN | RX_EN;
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+}
+
+void xge_mac_disable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data &= ~(TX_EN | RX_EN);
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644
index 000000000000..3c83fa617356
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.h
@@ -0,0 +1,107 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAC_H__
+#define __XGENE_ENET_V2_MAC_H__
+
+/* Register offsets */
+#define MAC_CONFIG_1 0xa000
+#define MAC_CONFIG_2 0xa004
+#define MII_MGMT_CONFIG 0xa020
+#define MII_MGMT_COMMAND 0xa024
+#define MII_MGMT_ADDRESS 0xa028
+#define MII_MGMT_CONTROL 0xa02c
+#define MII_MGMT_STATUS 0xa030
+#define MII_MGMT_INDICATORS 0xa034
+#define INTERFACE_CONTROL 0xa038
+#define STATION_ADDR0 0xa040
+#define STATION_ADDR1 0xa044
+
+#define RGMII_REG_0 0x27e0
+#define ICM_CONFIG0_REG_0 0x2c00
+#define ICM_CONFIG2_REG_0 0x2c08
+#define ECM_CONFIG0_REG_0 0x2d00
+
+/* Register fields */
+#define SOFT_RESET BIT(31)
+#define TX_EN BIT(0)
+#define RX_EN BIT(2)
+#define PAD_CRC BIT(2)
+#define CRC_EN BIT(1)
+#define FULL_DUPLEX BIT(0)
+
+#define INTF_MODE_POS 8
+#define INTF_MODE_LEN 2
+#define HD_MODE_POS 25
+#define HD_MODE_LEN 2
+#define CFG_MACMODE_POS 18
+#define CFG_MACMODE_LEN 2
+#define CFG_WAITASYNCRD_POS 0
+#define CFG_WAITASYNCRD_LEN 16
+#define CFG_SPEED_125_POS 24
+#define CFG_WFIFOFULLTHR_POS 0
+#define CFG_WFIFOFULLTHR_LEN 7
+#define MGMT_CLOCK_SEL_POS 0
+#define MGMT_CLOCK_SEL_LEN 3
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define MII_MGMT_BUSY BIT(0)
+#define MII_READ_CYCLE BIT(0)
+#define CFG_WAITASYNCRD_EN BIT(16)
+
+static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ *var &= ~mask;
+ *var |= ((val << pos) & mask);
+}
+
+static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ return (var & mask) >> pos;
+}
+
+#define SET_REG_BITS(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
+
+#define SET_REG_BIT(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, 1, val)
+
+#define GET_REG_BITS(var, field) \
+ xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
+
+#define GET_REG_BIT(var, field) ((var) & (field))
+
+struct xge_pdata;
+
+void xge_mac_reset(struct xge_pdata *pdata);
+void xge_mac_set_speed(struct xge_pdata *pdata);
+void xge_mac_enable(struct xge_pdata *pdata);
+void xge_mac_disable(struct xge_pdata *pdata);
+void xge_mac_init(struct xge_pdata *pdata);
+void xge_mac_set_station_addr(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644
index 000000000000..0f2ad50f3bd7
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -0,0 +1,759 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static const struct acpi_device_id xge_acpi_match[];
+
+static int xge_get_resources(struct xge_pdata *pdata)
+{
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ int phy_mode, ret = 0;
+ struct resource *res;
+ struct device *dev;
+
+ pdev = pdata->pdev;
+ dev = &pdev->dev;
+ ndev = pdata->ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Resource enet_csr not defined\n");
+ return -ENODEV;
+ }
+
+ pdata->resources.base_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->resources.base_addr) {
+ dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+ return -ENOMEM;
+ }
+
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ eth_hw_addr_random(ndev);
+
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+ phy_mode = device_get_phy_mode(dev);
+ if (phy_mode < 0) {
+ dev_err(dev, "Unable to get phy-connection-type\n");
+ return phy_mode;
+ }
+ pdata->resources.phy_mode = phy_mode;
+
+ if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
+ dev_err(dev, "Incorrect phy-connection-type specified\n");
+ return -ENODEV;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to get irq\n");
+ return ret;
+ }
+ pdata->resources.irq = ret;
+
+ return 0;
+}
+
+static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ const u8 slots = XGENE_ENET_NUM_DESC - 1;
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_raw_desc *raw_desc;
+ u64 addr_lo, addr_hi;
+ u8 tail = ring->tail;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 len;
+ int i;
+
+ for (i = 0; i < nbuf; i++) {
+ raw_desc = &ring->raw_desc[tail];
+
+ len = XGENE_ENET_STD_MTU;
+ skb = netdev_alloc_skb(ndev, len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ ring->pkt_info[tail].skb = skb;
+ ring->pkt_info[tail].dma_addr = dma_addr;
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ dma_wmb();
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(E, 1));
+ tail = (tail + 1) & slots;
+ }
+
+ ring->tail = tail;
+
+ return 0;
+}
+
+static int xge_init_hw(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_port_reset(ndev);
+ if (ret)
+ return ret;
+
+ xge_port_init(ndev);
+ pdata->nbufs = NUM_BUFS;
+
+ return 0;
+}
+
+static irqreturn_t xge_irq(const int irq, void *data)
+{
+ struct xge_pdata *pdata = data;
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ xge_intr_disable(pdata);
+ __napi_schedule(&pdata->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xge_request_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
+
+ ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
+ pdata);
+ if (ret)
+ netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
+
+ return ret;
+}
+
+static void xge_free_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ free_irq(pdata->resources.irq, pdata);
+}
+
+static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
+ return true;
+
+ return false;
+}
+
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ static dma_addr_t dma_addr;
+ u64 addr_lo, addr_hi;
+ void *pkt_buf;
+ u8 tail;
+ u16 len;
+
+ tx_ring = pdata->tx_ring;
+ tail = tx_ring->tail;
+ len = skb_headlen(skb);
+ raw_desc = &tx_ring->raw_desc[tail];
+
+ if (!is_tx_slot_available(raw_desc)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Packet buffers should be 64B aligned */
+ pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!pkt_buf)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ memcpy(pkt_buf, skb->data, len);
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ tx_ring->pkt_info[tail].skb = skb;
+ tx_ring->pkt_info[tail].dma_addr = dma_addr;
+ tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
+
+ dma_wmb();
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(PKT_SIZE, len) |
+ SET_BITS(E, 0));
+ skb_tx_timestamp(skb);
+ xge_wr_csr(pdata, DMATXCTRL, 1);
+
+ tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+ return NETDEV_TX_OK;
+}
+
+static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_txc_poll(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ u32 data;
+ u8 head;
+
+ tx_ring = pdata->tx_ring;
+ head = tx_ring->head;
+
+ data = xge_rd_csr(pdata, DMATXSTATUS);
+ if (!GET_BITS(TXPKTCOUNT, data))
+ return;
+
+ while (1) {
+ raw_desc = &tx_ring->raw_desc[head];
+
+ if (!is_tx_hw_done(raw_desc))
+ break;
+
+ dma_rmb();
+
+ skb = tx_ring->pkt_info[head].skb;
+ dma_addr = tx_ring->pkt_info[head].dma_addr;
+ pkt_buf = tx_ring->pkt_info[head].pkt_buf;
+ pdata->stats.tx_packets++;
+ pdata->stats.tx_bytes += skb->len;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+
+ /* clear pktstart address and pktsize */
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ }
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *rx_ring;
+ struct xge_raw_desc *raw_desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int processed = 0;
+ u8 head, rx_error;
+ int i, ret;
+ u32 data;
+ u16 len;
+
+ rx_ring = pdata->rx_ring;
+ head = rx_ring->head;
+
+ data = xge_rd_csr(pdata, DMARXSTATUS);
+ if (!GET_BITS(RXPKTCOUNT, data))
+ return 0;
+
+ for (i = 0; i < budget; i++) {
+ raw_desc = &rx_ring->raw_desc[head];
+
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ break;
+
+ dma_rmb();
+
+ skb = rx_ring->pkt_info[head].skb;
+ rx_ring->pkt_info[head].skb = NULL;
+ dma_addr = rx_ring->pkt_info[head].dma_addr;
+ len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+
+ rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
+ if (unlikely(rx_error)) {
+ pdata->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ pdata->stats.rx_packets++;
+ pdata->stats.rx_bytes += len;
+ napi_gro_receive(&pdata->napi, skb);
+out:
+ ret = xge_refill_buffers(ndev, 1);
+ xge_wr_csr(pdata, DMARXSTATUS, 1);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+
+ if (ret)
+ break;
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ processed++;
+ }
+
+ rx_ring->head = head;
+
+ return processed;
+}
+
+static void xge_delete_desc_ring(struct net_device *ndev,
+ struct xge_desc_ring *ring)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ u16 size;
+
+ if (!ring)
+ return;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ if (ring->desc_addr)
+ dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
+
+ kfree(ring->pkt_info);
+ kfree(ring);
+}
+
+static void xge_free_buffers(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ struct device *dev = &pdata->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ skb = ring->pkt_info[i].skb;
+ dma_addr = ring->pkt_info[i].dma_addr;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_delete_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ xge_delete_desc_ring(ndev, pdata->tx_ring);
+
+ xge_rx_poll(ndev, 64);
+ xge_free_buffers(ndev);
+ xge_delete_desc_ring(ndev, pdata->rx_ring);
+}
+
+static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *ring;
+ u16 size;
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->ndev = ndev;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
+ if (!ring->desc_addr)
+ goto err;
+
+ ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
+ GFP_KERNEL);
+ if (!ring->pkt_info)
+ goto err;
+
+ xge_setup_desc(ring);
+
+ return ring;
+
+err:
+ xge_delete_desc_ring(ndev, ring);
+
+ return NULL;
+}
+
+static int xge_create_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring;
+ int ret;
+
+ /* create tx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->tx_ring = ring;
+ xge_update_tx_desc_addr(pdata);
+
+ /* create rx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->rx_ring = ring;
+ xge_update_rx_desc_addr(pdata);
+
+ ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xge_delete_desc_rings(ndev);
+
+ return -ENOMEM;
+}
+
+static int xge_open(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_create_desc_rings(ndev);
+ if (ret)
+ return ret;
+
+ napi_enable(&pdata->napi);
+ ret = xge_request_irq(ndev);
+ if (ret)
+ return ret;
+
+ xge_intr_enable(pdata);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+
+ phy_start(ndev->phydev);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int xge_close(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ xge_mac_disable(pdata);
+ phy_stop(ndev->phydev);
+
+ xge_intr_disable(pdata);
+ xge_free_irq(ndev);
+ napi_disable(&pdata->napi);
+ xge_delete_desc_rings(ndev);
+
+ return 0;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct xge_pdata *pdata;
+ int processed;
+
+ pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ processed = xge_rx_poll(ndev, budget);
+
+ if (processed < budget) {
+ napi_complete_done(napi, processed);
+ xge_intr_enable(pdata);
+ }
+
+ return processed;
+}
+
+static int xge_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ xge_mac_set_station_addr(pdata);
+
+ return 0;
+}
+
+static bool is_tx_pending(struct xge_raw_desc *raw_desc)
+{
+ if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_free_pending_skb(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ int i;
+
+ tx_ring = pdata->tx_ring;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &tx_ring->raw_desc[i];
+
+ if (!is_tx_pending(raw_desc))
+ continue;
+
+ skb = tx_ring->pkt_info[i].skb;
+ dma_addr = tx_ring->pkt_info[i].dma_addr;
+ pkt_buf = tx_ring->pkt_info[i].pkt_buf;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_timeout(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ rtnl_lock();
+
+ if (!netif_running(ndev))
+ goto out;
+
+ netif_stop_queue(ndev);
+ xge_intr_disable(pdata);
+ napi_disable(&pdata->napi);
+
+ xge_wr_csr(pdata, DMATXCTRL, 0);
+ xge_txc_poll(ndev);
+ xge_free_pending_skb(ndev);
+ xge_wr_csr(pdata, DMATXSTATUS, ~0U);
+
+ xge_setup_desc(pdata->tx_ring);
+ xge_update_tx_desc_addr(pdata);
+ xge_mac_init(pdata);
+
+ napi_enable(&pdata->napi);
+ xge_intr_enable(pdata);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+
+out:
+ rtnl_unlock();
+}
+
+static void xge_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_stats *stats = &pdata->stats;
+
+ storage->tx_packets += stats->tx_packets;
+ storage->tx_bytes += stats->tx_bytes;
+
+ storage->rx_packets += stats->rx_packets;
+ storage->rx_bytes += stats->rx_bytes;
+ storage->rx_errors += stats->rx_errors;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+ .ndo_open = xge_open,
+ .ndo_stop = xge_close,
+ .ndo_start_xmit = xge_start_xmit,
+ .ndo_set_mac_address = xge_set_mac_addr,
+ .ndo_tx_timeout = xge_timeout,
+ .ndo_get_stats64 = xge_get_stats64,
+};
+
+static int xge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct xge_pdata *pdata;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*pdata));
+ if (!ndev)
+ return -ENOMEM;
+
+ pdata = netdev_priv(ndev);
+
+ pdata->pdev = pdev;
+ pdata->ndev = ndev;
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, pdata);
+ ndev->netdev_ops = &xgene_ndev_ops;
+
+ ndev->features |= NETIF_F_GSO |
+ NETIF_F_GRO;
+
+ ret = xge_get_resources(pdata);
+ if (ret)
+ goto err;
+
+ ndev->hw_features = ndev->features;
+ xge_set_ethtool_ops(ndev);
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ netdev_err(ndev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = xge_init_hw(ndev);
+ if (ret)
+ goto err;
+
+ ret = xge_mdio_config(ndev);
+ if (ret)
+ goto err;
+
+ netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int xge_remove(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+ struct net_device *ndev;
+
+ pdata = platform_get_drvdata(pdev);
+ ndev = pdata->ndev;
+
+ rtnl_lock();
+ if (netif_running(ndev))
+ dev_close(ndev);
+ rtnl_unlock();
+
+ xge_mdio_remove(ndev);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static void xge_shutdown(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata)
+ return;
+
+ if (!pdata->ndev)
+ return;
+
+ xge_remove(pdev);
+}
+
+static const struct acpi_device_id xge_acpi_match[] = {
+ { "APMC0D80" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+
+static struct platform_driver xge_driver = {
+ .driver = {
+ .name = "xgene-enet-v2",
+ .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ },
+ .probe = xge_probe,
+ .remove = xge_remove,
+ .shutdown = xge_shutdown,
+};
+module_platform_driver(xge_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_VERSION(XGENE_ENET_V2_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644
index 000000000000..969b258cb7de
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -0,0 +1,80 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAIN_H__
+#define __XGENE_ENET_V2_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mac.h"
+#include "enet.h"
+#include "ring.h"
+#include "ethtool.h"
+
+#define XGENE_ENET_V2_VERSION "v1.0"
+#define XGENE_ENET_STD_MTU 1536
+#define XGENE_ENET_MIN_FRAME 60
+#define IRQ_ID_SIZE 16
+
+struct xge_resource {
+ void __iomem *base_addr;
+ int phy_mode;
+ u32 irq;
+};
+
+struct xge_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+};
+
+/* ethernet private data */
+struct xge_pdata {
+ struct xge_resource resources;
+ struct xge_desc_ring *tx_ring;
+ struct xge_desc_ring *rx_ring;
+ struct platform_device *pdev;
+ char irq_name[IRQ_ID_SIZE];
+ struct mii_bus *mdio_bus;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct xge_stats stats;
+ int phy_speed;
+ u8 nbufs;
+};
+
+int xge_mdio_config(struct net_device *ndev);
+void xge_mdio_remove(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
new file mode 100644
index 000000000000..f5fe3bb2e59d
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -0,0 +1,168 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+ struct xge_pdata *pdata = bus->priv;
+ u32 done, val = 0;
+ u8 wait = 10;
+
+ SET_REG_BITS(&val, PHY_ADDR, phy_id);
+ SET_REG_BITS(&val, REG_ADDR, reg);
+ xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+ xge_wr_csr(pdata, MII_MGMT_CONTROL, data);
+ do {
+ usleep_range(5, 10);
+ done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+ } while ((done & MII_MGMT_BUSY) && wait--);
+
+ if (done & MII_MGMT_BUSY) {
+ dev_err(&bus->dev, "MII_MGMT write failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct xge_pdata *pdata = bus->priv;
+ u32 data, done, val = 0;
+ u8 wait = 10;
+
+ SET_REG_BITS(&val, PHY_ADDR, phy_id);
+ SET_REG_BITS(&val, REG_ADDR, reg);
+ xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+ xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE);
+ do {
+ usleep_range(5, 10);
+ done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+ } while ((done & MII_MGMT_BUSY) && wait--);
+
+ if (done & MII_MGMT_BUSY) {
+ dev_err(&bus->dev, "MII_MGMT read failed\n");
+ return -ETIMEDOUT;
+ }
+
+ data = xge_rd_csr(pdata, MII_MGMT_STATUS);
+ xge_wr_csr(pdata, MII_MGMT_COMMAND, 0);
+
+ return data;
+}
+
+static void xge_adjust_link(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (phydev->link) {
+ if (pdata->phy_speed != phydev->speed) {
+ pdata->phy_speed = phydev->speed;
+ xge_mac_set_speed(pdata);
+ xge_mac_enable(pdata);
+ phy_print_status(phydev);
+ }
+ } else {
+ if (pdata->phy_speed != SPEED_UNKNOWN) {
+ pdata->phy_speed = SPEED_UNKNOWN;
+ xge_mac_disable(pdata);
+ phy_print_status(phydev);
+ }
+ }
+}
+
+void xge_mdio_remove(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct mii_bus *mdio_bus = pdata->mdio_bus;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ if (mdio_bus->state == MDIOBUS_REGISTERED)
+ mdiobus_unregister(mdio_bus);
+
+ mdiobus_free(mdio_bus);
+}
+
+int xge_mdio_config(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct mii_bus *mdio_bus;
+ struct phy_device *phydev;
+ int ret;
+
+ mdio_bus = mdiobus_alloc();
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus";
+ mdio_bus->read = xge_mdio_read;
+ mdio_bus->write = xge_mdio_write;
+ mdio_bus->priv = pdata;
+ mdio_bus->parent = dev;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+ pdata->mdio_bus = mdio_bus;
+
+ mdio_bus->phy_mask = 0x1;
+ ret = mdiobus_register(mdio_bus);
+ if (ret)
+ goto err;
+
+ phydev = phy_find_first(mdio_bus);
+ if (!phydev) {
+ dev_err(dev, "no PHY found\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ phydev = phy_connect(ndev, phydev_name(phydev),
+ &xge_adjust_link,
+ pdata->resources.phy_mode);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = PTR_ERR(phydev);
+ goto err;
+ }
+
+ phydev->supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_AUI |
+ SUPPORTED_MII |
+ SUPPORTED_FIBRE |
+ SUPPORTED_BNC);
+ phydev->advertising = phydev->supported;
+ pdata->phy_speed = SPEED_UNKNOWN;
+
+ return 0;
+err:
+ xge_mdio_remove(ndev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644
index 000000000000..38810828f8f0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.c
@@ -0,0 +1,81 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+/* create circular linked list of descriptors */
+void xge_setup_desc(struct xge_desc_ring *ring)
+{
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_h, next_dma;
+ u16 offset;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &ring->raw_desc[i];
+
+ offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
+ next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ dma_h = upper_32_bits(next_dma);
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
+ SET_BITS(NEXT_DESC_ADDRH, dma_h));
+ }
+}
+
+void xge_update_tx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->tx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMATXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_update_rx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMARXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_intr_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = RX_PKT_RCVD | TX_PKT_SENT;
+ xge_wr_csr(pdata, DMAINTRMASK, data);
+}
+
+void xge_intr_disable(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, DMAINTRMASK, 0);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644
index 000000000000..abc8c9a84954
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.h
@@ -0,0 +1,119 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_RING_H__
+#define __XGENE_ENET_V2_RING_H__
+
+#define XGENE_ENET_DESC_SIZE 64
+#define XGENE_ENET_NUM_DESC 256
+#define NUM_BUFS 8
+#define SLOT_EMPTY 0xfff
+
+#define DMATXCTRL 0xa180
+#define DMATXDESCL 0xa184
+#define DMATXDESCH 0xa1a0
+#define DMATXSTATUS 0xa188
+#define DMARXCTRL 0xa18c
+#define DMARXDESCL 0xa190
+#define DMARXDESCH 0xa1a4
+#define DMARXSTATUS 0xa194
+#define DMAINTRMASK 0xa198
+#define DMAINTERRUPT 0xa19c
+
+#define D_POS 62
+#define D_LEN 2
+#define E_POS 63
+#define E_LEN 1
+#define PKT_ADDRL_POS 0
+#define PKT_ADDRL_LEN 32
+#define PKT_ADDRH_POS 32
+#define PKT_ADDRH_LEN 10
+#define PKT_SIZE_POS 32
+#define PKT_SIZE_LEN 12
+#define NEXT_DESC_ADDRL_POS 0
+#define NEXT_DESC_ADDRL_LEN 32
+#define NEXT_DESC_ADDRH_POS 48
+#define NEXT_DESC_ADDRH_LEN 10
+
+#define TXPKTCOUNT_POS 16
+#define TXPKTCOUNT_LEN 8
+#define RXPKTCOUNT_POS 16
+#define RXPKTCOUNT_LEN 8
+
+#define TX_PKT_SENT BIT(0)
+#define TX_BUS_ERROR BIT(3)
+#define RX_PKT_RCVD BIT(4)
+#define RX_BUS_ERROR BIT(7)
+#define RXSTATUS_RXPKTRCVD BIT(0)
+
+struct xge_raw_desc {
+ __le64 m0;
+ __le64 m1;
+ __le64 m2;
+ __le64 m3;
+ __le64 m4;
+ __le64 m5;
+ __le64 m6;
+ __le64 m7;
+};
+
+struct pkt_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *pkt_buf;
+};
+
+/* software context of a descriptor ring */
+struct xge_desc_ring {
+ struct net_device *ndev;
+ dma_addr_t dma_addr;
+ u8 head;
+ u8 tail;
+ union {
+ void *desc_addr;
+ struct xge_raw_desc *raw_desc;
+ };
+ struct pkt_info (*pkt_info);
+};
+
+static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define SET_BITS(field, val) \
+ xge_set_desc_bits(field ## _POS, field ## _LEN, val)
+
+#define GET_BITS(field, src) \
+ xge_get_desc_bits(field ## _POS, field ## _LEN, src)
+
+void xge_setup_desc(struct xge_desc_ring *ring);
+void xge_update_tx_desc_addr(struct xge_pdata *pdata);
+void xge_update_rx_desc_addr(struct xge_pdata *pdata);
+void xge_intr_enable(struct xge_pdata *pdata);
+void xge_intr_disable(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_RING_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 06e681697c17..2a835e07adfb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -494,7 +494,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
break;
}
- mc2 |= FULL_DUPLEX2 | PAD_CRC;
+ mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
@@ -623,6 +623,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+ CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 5f83037bb96b..d250bfe94d24 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -163,6 +163,7 @@ enum xgene_enet_rm {
#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4)
@@ -215,6 +216,7 @@ enum xgene_enet_rm {
#define ENET_GHD_MODE BIT(26)
#define FULL_DUPLEX2 BIT(0)
#define PAD_CRC BIT(2)
+#define LENGTH_CHK BIT(4)
#define SCAN_AUTO_INCR BIT(5)
#define TBYT_ADDR 0x38
#define TPKT_ADDR 0x39
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index b3568c453b14..5f37ed3506d5 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -601,14 +601,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void xgene_enet_skip_csum(struct sk_buff *skb)
+static void xgene_enet_rx_csum(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
- if (!ip_is_fragment(iph) ||
- (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ if (ip_is_fragment(iph))
+ return;
+
+ if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+ return;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
@@ -648,12 +658,24 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
buf_pool->head = head;
}
+/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
+static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
+{
+ if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
+ if (ntohs(eth_hdr(skb)->h_proto) < 46)
+ return true;
+ }
+
+ return false;
+}
+
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc,
struct xgene_enet_raw_desc *exp_desc)
{
struct xgene_enet_desc_ring *buf_pool, *page_pool;
u32 datalen, frag_size, skb_index;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
dma_addr_t dma_addr;
struct sk_buff *skb;
@@ -666,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
bool nv;
ndev = rx_ring->ndev;
+ pdata = netdev_priv(ndev);
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
page_pool = rx_ring->page_pool;
@@ -676,30 +699,29 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skb = buf_pool->rx_skb[skb_index];
buf_pool->rx_skb[skb_index] = NULL;
+ datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+ skb_put(skb, datalen);
+ prefetch(skb->data - NET_IP_ALIGN);
+ skb->protocol = eth_type_trans(skb, ndev);
+
/* checking for error */
- status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
- if (unlikely(status > 2)) {
- dev_kfree_skb_any(skb);
- xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
- xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
- status);
- ret = -EIO;
- goto out;
+ if (unlikely(status)) {
+ if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ dev_kfree_skb_any(skb);
+ xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
+ xgene_enet_parse_error(rx_ring, pdata, status);
+ goto out;
+ }
}
- /* strip off CRC as HW isn't doing this */
- datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
-
nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv)
+ if (!nv) {
+ /* strip off CRC as HW isn't doing this */
datalen -= 4;
-
- skb_put(skb, datalen);
- prefetch(skb->data - NET_IP_ALIGN);
-
- if (!nv)
goto skip_jumbo;
+ }
slots = page_pool->slots - 1;
head = page_pool->head;
@@ -728,11 +750,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skip_jumbo:
skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, ndev);
- if (likely((ndev->features & NETIF_F_IP_CSUM) &&
- skb->protocol == htons(ETH_P_IP))) {
- xgene_enet_skip_csum(skb);
- }
+ xgene_enet_rx_csum(skb);
rx_ring->rx_packets++;
rx_ring->rx_bytes += datalen;
@@ -2039,7 +2057,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
- ndev->features |= NETIF_F_TSO;
+ ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 52571741da9f..0d4be2425ebc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -41,6 +41,7 @@
#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
+#define ETHER_MIN_PACKET 64
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index ece19e6d68e3..423240c97d39 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -341,8 +341,15 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+ /* Errata 10GE_1 - FIFO threshold default value incorrect */
+ RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
+ /* Errata 10GE_1 - FIFO threshold default value incorrect */
+ xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
+ RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
+ xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
+
xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
data |= BIT(12);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 03b847ad8937..e644a429ebf4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -65,6 +65,11 @@
#define XG_DEF_PAUSE_THRES 0x390
#define XG_DEF_PAUSE_OFF_THRES 0x2c0
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
+#define XG_RSIF_CLE_BUFF_THRESH 0x3
+#define RSIF_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 3)
+#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
+#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
+#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 23873395f100..68de2f2652f2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -434,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
- phy_start_aneg(ndev->phydev);
+ phy_start(ndev->phydev);
netif_start_queue(ndev);
@@ -556,6 +556,8 @@ static int arc_emac_stop(struct net_device *ndev)
napi_disable(&priv->napi);
netif_stop_queue(ndev);
+ phy_stop(ndev->phydev);
+
/* Disable interrupts */
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d4a409139ea2..78c5de467426 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -102,9 +102,6 @@ struct alx_napi {
#define ALX_MAX_NAPIS 8
-#define ALX_FLAG_USING_MSIX BIT(0)
-#define ALX_FLAG_USING_MSI BIT(1)
-
struct alx_priv {
struct net_device *dev;
@@ -112,7 +109,6 @@ struct alx_priv {
/* msi-x vectors */
int num_vec;
- struct msix_entry *msix_entries;
/* all descriptor memory */
struct {
@@ -139,8 +135,6 @@ struct alx_priv {
u16 msg_enable;
- int flags;
-
/* protects hw.stats */
spinlock_t stats_lock;
};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6a27c2662675..a8c2db881b75 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -314,7 +314,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
napi_complete_done(&np->napi, work);
/* enable interrupt */
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_mask_msix(hw, np->vec_idx, false);
} else {
spin_lock_irqsave(&alx->irq_lock, flags);
@@ -811,7 +811,7 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
u32 tbl[2] = {0, 0};
int i, vector, idx, shift;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
/* tx mappings */
for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
idx = txq_vec_mapping_shift[i * 2];
@@ -828,29 +828,19 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
-static bool alx_enable_msix(struct alx_priv *alx)
+static int alx_enable_msix(struct alx_priv *alx)
{
- int i, err, num_vec, num_txq, num_rxq;
+ int err, num_vec, num_txq, num_rxq;
num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
num_rxq = 1;
num_vec = max_t(int, num_txq, num_rxq) + 1;
- alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!alx->msix_entries) {
- netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
- return false;
- }
-
- for (i = 0; i < num_vec; i++)
- alx->msix_entries[i].entry = i;
-
- err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+ err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
if (err) {
- kfree(alx->msix_entries);
netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
- return false;
+ return err;
}
alx->num_vec = num_vec;
@@ -858,7 +848,7 @@ static bool alx_enable_msix(struct alx_priv *alx)
alx->num_txq = num_txq;
alx->num_rxq = num_rxq;
- return true;
+ return err;
}
static int alx_request_msix(struct alx_priv *alx)
@@ -866,7 +856,7 @@ static int alx_request_msix(struct alx_priv *alx)
struct net_device *netdev = alx->dev;
int i, err, vector = 0, free_vector = 0;
- err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+ err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
0, netdev->name, alx);
if (err)
goto out_err;
@@ -889,7 +879,7 @@ static int alx_request_msix(struct alx_priv *alx)
sprintf(np->irq_lbl, "%s-unused", netdev->name);
np->vec_idx = vector;
- err = request_irq(alx->msix_entries[vector].vector,
+ err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
alx_intr_msix_ring, 0, np->irq_lbl, np);
if (err)
goto out_free;
@@ -897,47 +887,31 @@ static int alx_request_msix(struct alx_priv *alx)
return 0;
out_free:
- free_irq(alx->msix_entries[free_vector++].vector, alx);
+ free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
vector--;
for (i = 0; i < vector; i++)
- free_irq(alx->msix_entries[free_vector++].vector,
+ free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
alx->qnapi[i]);
out_err:
return err;
}
-static void alx_init_intr(struct alx_priv *alx, bool msix)
+static int alx_init_intr(struct alx_priv *alx)
{
- if (msix) {
- if (alx_enable_msix(alx))
- alx->flags |= ALX_FLAG_USING_MSIX;
- }
+ int ret;
- if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
- alx->num_vec = 1;
- alx->num_napi = 1;
- alx->num_txq = 1;
- alx->num_rxq = 1;
-
- if (!pci_enable_msi(alx->hw.pdev))
- alx->flags |= ALX_FLAG_USING_MSI;
- }
-}
-
-static void alx_disable_advanced_intr(struct alx_priv *alx)
-{
- if (alx->flags & ALX_FLAG_USING_MSIX) {
- kfree(alx->msix_entries);
- pci_disable_msix(alx->hw.pdev);
- alx->flags &= ~ALX_FLAG_USING_MSIX;
- }
+ ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (ret)
+ return ret;
- if (alx->flags & ALX_FLAG_USING_MSI) {
- pci_disable_msi(alx->hw.pdev);
- alx->flags &= ~ALX_FLAG_USING_MSI;
- }
+ alx->num_vec = 1;
+ alx->num_napi = 1;
+ alx->num_txq = 1;
+ alx->num_rxq = 1;
+ return 0;
}
static void alx_irq_enable(struct alx_priv *alx)
@@ -950,10 +924,11 @@ static void alx_irq_enable(struct alx_priv *alx)
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_post_write(hw);
- if (alx->flags & ALX_FLAG_USING_MSIX)
+ if (alx->hw.pdev->msix_enabled) {
/* enable all msix irqs */
for (i = 0; i < alx->num_vec; i++)
alx_mask_msix(hw, i, false);
+ }
}
static void alx_irq_disable(struct alx_priv *alx)
@@ -965,13 +940,13 @@ static void alx_irq_disable(struct alx_priv *alx)
alx_write_mem32(hw, ALX_IMR, 0);
alx_post_write(hw);
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
for (i = 0; i < alx->num_vec; i++) {
alx_mask_msix(hw, i, true);
- synchronize_irq(alx->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
}
} else {
- synchronize_irq(alx->hw.pdev->irq);
+ synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
}
}
@@ -981,8 +956,11 @@ static int alx_realloc_resources(struct alx_priv *alx)
alx_free_rings(alx);
alx_free_napis(alx);
- alx_disable_advanced_intr(alx);
- alx_init_intr(alx, false);
+ pci_free_irq_vectors(alx->hw.pdev);
+
+ err = alx_init_intr(alx);
+ if (err)
+ return err;
err = alx_alloc_napis(alx);
if (err)
@@ -1004,7 +982,7 @@ static int alx_request_irq(struct alx_priv *alx)
msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
err = alx_request_msix(alx);
if (!err)
@@ -1016,20 +994,20 @@ static int alx_request_irq(struct alx_priv *alx)
goto out;
}
- if (alx->flags & ALX_FLAG_USING_MSI) {
+ if (alx->hw.pdev->msi_enabled) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
msi_ctrl | ALX_MSI_MASK_SEL_LINE);
- err = request_irq(pdev->irq, alx_intr_msi, 0,
+ err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0,
alx->dev->name, alx);
if (!err)
goto out;
+
/* fall back to legacy interrupt */
- alx->flags &= ~ALX_FLAG_USING_MSI;
- pci_disable_msi(alx->hw.pdev);
+ pci_free_irq_vectors(alx->hw.pdev);
}
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
- err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+ err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED,
alx->dev->name, alx);
out:
if (!err)
@@ -1042,18 +1020,15 @@ out:
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
- int i, vector = 0;
+ int i;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
- free_irq(alx->msix_entries[vector++].vector, alx);
+ free_irq(pci_irq_vector(pdev, 0), alx);
+ if (alx->hw.pdev->msix_enabled) {
for (i = 0; i < alx->num_napi; i++)
- free_irq(alx->msix_entries[vector++].vector,
- alx->qnapi[i]);
- } else {
- free_irq(pdev->irq, alx);
+ free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
}
- alx_disable_advanced_intr(alx);
+ pci_free_irq_vectors(pdev);
}
static int alx_identify_hw(struct alx_priv *alx)
@@ -1221,7 +1196,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
- alx_init_intr(alx, true);
+ err = alx_enable_msix(alx);
+ if (err < 0) {
+ err = alx_init_intr(alx);
+ if (err)
+ return err;
+ }
if (!resume)
netif_carrier_off(alx->dev);
@@ -1264,7 +1244,7 @@ out_free_rings:
alx_free_rings(alx);
alx_free_napis(alx);
out_disable_adv_intr:
- alx_disable_advanced_intr(alx);
+ pci_free_irq_vectors(alx->hw.pdev);
return err;
}
@@ -1637,11 +1617,11 @@ static void alx_poll_controller(struct net_device *netdev)
struct alx_priv *alx = netdev_priv(netdev);
int i;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_intr_msix_misc(0, alx);
for (i = 0; i < alx->num_txq; i++)
alx_intr_msix_ring(0, alx->qnapi[i]);
- } else if (alx->flags & ALX_FLAG_USING_MSI)
+ } else if (alx->hw.pdev->msi_enabled)
alx_intr_msi(0, alx);
else
alx_intr_legacy(0, alx);
@@ -1783,7 +1763,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &alx_netdev_ops;
netdev->ethtool_ops = &alx_ethtool_ops;
- netdev->irq = pdev->irq;
+ netdev->irq = pci_irq_vector(pdev, 0);
netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 022772e1e249..83d2db2abb45 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1886,7 +1886,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(pdev, page, offset,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
@@ -2230,7 +2230,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(adapter->pdev, page,
offset, hdr_len,
PCI_DMA_TODEVICE);
@@ -2254,9 +2254,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
data_len -= buffer_info->length;
page = virt_to_page(skb->data +
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
- offset = (unsigned long)(skb->data +
- (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
- ~PAGE_MASK;
+ offset = offset_in_page(skb->data +
+ (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
buffer_info->dma = pci_map_page(adapter->pdev,
page, offset, buffer_info->length,
PCI_DMA_TODEVICE);
@@ -2268,7 +2267,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* not TSO */
buffer_info->length = buf_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(adapter->pdev, page,
offset, buf_len, PCI_DMA_TODEVICE);
if (++next_to_use == tpd_ring->count)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 940fb24bba21..96413808c726 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -109,7 +109,6 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
- select HWMON
imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config TIGON3_HWMON
+ bool "Broadcom Tigon3 HWMON support"
+ default y
+ depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor on Tigon3 devices.
+
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a68d4889f5db..099b374c1b17 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -22,6 +22,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <net/dsa.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -284,6 +285,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX-queue statistics are dynamically appended */
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -338,7 +340,8 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
continue;
j++;
}
- return j;
+ /* Include per-queue statistics */
+ return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
default:
return -EOPNOTSUPP;
}
@@ -349,6 +352,7 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
+ char buf[128];
int i, j;
switch (stringset) {
@@ -363,6 +367,18 @@ static void bcm_sysport_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
j++;
}
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ snprintf(buf, sizeof(buf), "txq%d_packets", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+
+ snprintf(buf, sizeof(buf), "txq%d_bytes", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+ }
break;
default:
break;
@@ -418,6 +434,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *ring;
int i, j;
if (netif_running(dev))
@@ -436,6 +453,22 @@ static void bcm_sysport_get_stats(struct net_device *dev,
data[j] = *(unsigned long *)p;
j++;
}
+
+ /* For SYSTEMPORT Lite since we have holes in our statistics, j would
+ * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
+ * needs to point to how many total statistics we have minus the
+ * number of per TX queue statistics
+ */
+ j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
+ dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ring = &priv->tx_rings[i];
+ data[j] = ring->packets;
+ j++;
+ data[j] = ring->bytes;
+ j++;
+ }
}
static void bcm_sysport_get_wol(struct net_device *dev,
@@ -637,6 +670,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
+ /* Clear status before servicing to reduce spurious interrupts */
+ intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
+
/* Determine how much we should process since last call, SYSTEMPORT Lite
* groups the producer and consumer indexes into the same 32-bit
* which we access using RDMA_CONS_INDEX
@@ -647,11 +683,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
- if (p_index < priv->rx_c_index)
- to_process = (RDMA_CONS_INDEX_MASK + 1) -
- priv->rx_c_index + p_index;
- else
- to_process = p_index - priv->rx_c_index;
+ to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
netif_dbg(priv, rx_status, ndev,
"p_index=%d rx_c_index=%d to_process=%d\n",
@@ -746,26 +778,26 @@ next:
return processed;
}
-static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
struct bcm_sysport_cb *cb,
unsigned int *bytes_compl,
unsigned int *pkts_compl)
{
+ struct bcm_sysport_priv *priv = ring->priv;
struct device *kdev = &priv->pdev->dev;
- struct net_device *ndev = priv->netdev;
if (cb->skb) {
- ndev->stats.tx_bytes += cb->skb->len;
+ ring->bytes += cb->skb->len;
*bytes_compl += cb->skb->len;
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
- ndev->stats.tx_packets++;
+ ring->packets++;
(*pkts_compl)++;
bcm_sysport_free_cb(cb);
/* SKB fragment */
} else if (dma_unmap_addr(cb, dma_addr)) {
- ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ ring->bytes += dma_unmap_len(cb, dma_len);
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
@@ -782,6 +814,13 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_cb *cb;
u32 hw_ind;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (!ring->priv->is_lite)
+ intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
+ else
+ intrl2_0_writel(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
+
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -803,7 +842,7 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
while (last_tx_cn-- > 0) {
cb = ring->cbs + last_c_index;
- bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+ bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
last_c_index++;
@@ -1632,6 +1671,24 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ struct bcm_sysport_tx_ring *ring;
+ unsigned int q;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
+ tx_bytes += ring->bytes;
+ tx_packets += ring->packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ return &dev->stats;
+}
+
static void bcm_sysport_netif_start(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1893,6 +1950,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_sysport_poll_controller,
#endif
+ .ndo_get_stats = bcm_sysport_get_nstats,
};
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 863ddd7870b7..77a51c167a69 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -647,6 +647,9 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+/* TX bytes and packets */
+#define NUM_SYSPORT_TXQ_STAT 2
+
struct bcm_sysport_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_sizeof;
@@ -690,6 +693,8 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
+ unsigned long packets; /* packets statistics */
+ unsigned long bytes; /* bytes statistics */
};
/* Driver private structure */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index d59cfcc4c4d5..6322594ab260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_net.h>
#include "bgmac.h"
static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
- u8 *mac;
+ const u8 *mac = NULL;
int err;
bgmac = bgmac_alloc(&core->dev);
@@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- dev_err(bgmac->dev, "Unsupported core_unit %d\n",
- core->core_unit);
- err = -ENOTSUPP;
- goto err;
+ if (bgmac->dev->of_node)
+ mac = of_get_mac_address(bgmac->dev->of_node);
+
+ /* If no MAC address assigned via device tree, check SPROM */
+ if (!mac) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
}
ether_addr_copy(bgmac->net_dev->dev_addr, mac);
@@ -192,36 +199,50 @@ static int bgmac_probe(struct bcma_device *core)
goto err1;
}
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
+ bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO);
if (bgmac->has_robosw)
dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ if (sprom->boardflags_lo & BGMAC_BFL_ENETADM)
dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
/* Feature Flags */
- switch (core->bus->chipinfo.id) {
+ switch (ci->id) {
+ /* BCM 471X/535X family */
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM53572:
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+ ci->pkg == BCMA_PKG_ID_BCM47186) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
- case BCMA_CHIP_ID_BCM53572:
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ case BCMA_CHIP_ID_BCM53573:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
}
break;
case BCMA_CHIP_ID_BCM4749:
@@ -229,18 +250,11 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == 10) {
+ if (ci->pkg == 10) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
break;
- case BCMA_CHIP_ID_BCM4716:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
- case BCMA_CHIP_ID_BCM47162:
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- break;
/* bcm4707_family */
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM47094:
@@ -249,21 +263,6 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
- case BCMA_CHIP_ID_BCM53573:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
- if (core->core_unit == 0) {
- bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |=
- BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
- } else if (core->core_unit == 1) {
- bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
- bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
- }
- break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index da1b8b225eb9..73aca97a96bc 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -21,8 +21,12 @@
#include <linux/of_net.h>
#include "bgmac.h"
+#define NICPM_PADRING_CFG 0x00000004
#define NICPM_IOMUX_CTRL 0x00000008
+#define NICPM_PADRING_CFG_INIT_VAL 0x74000000
+#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000
+
#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
#define NICPM_IOMUX_CTRL_SPD_10M 0
@@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
if (!bgmac->plat.nicpm_base)
return;
+ /* SET RGMII IO CONFIG */
+ writel(NICPM_PADRING_CFG_INIT_VAL,
+ bgmac->plat.nicpm_base + NICPM_PADRING_CFG);
+
val = NICPM_IOMUX_CTRL_INIT_VAL;
switch (bgmac->net_dev->phydev->speed) {
default:
@@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int bgmac_suspend(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_suspend(bgmac);
+}
+
+static int bgmac_resume(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_resume(bgmac);
+}
+
+static const struct dev_pm_ops bgmac_pm_ops = {
+ .suspend = bgmac_suspend,
+ .resume = bgmac_resume
+};
+
+#define BGMAC_PM_OPS (&bgmac_pm_ops)
+#else
+#define BGMAC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id bgmac_of_enet_match[] = {
{.compatible = "brcm,amac",},
{.compatible = "brcm,nsp-amac",},
@@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = {
.driver = {
.name = "bgmac-enet",
.of_match_table = bgmac_of_enet_match,
+ .pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
.remove = bgmac_remove,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fd66fca00e01..ba4d2e145bb9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/bcm47xx_nvram.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
@@ -1480,6 +1481,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
+ dev_set_drvdata(bgmac->dev, bgmac);
if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
@@ -1552,5 +1554,55 @@ void bgmac_enet_remove(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
+int bgmac_enet_suspend(struct bgmac *bgmac)
+{
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ phy_stop(bgmac->net_dev->phydev);
+
+ netif_stop_queue(bgmac->net_dev);
+
+ napi_disable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_detach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ bgmac_chip_intrs_off(bgmac);
+ bgmac_chip_reset(bgmac);
+ bgmac_dma_cleanup(bgmac);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
+
+int bgmac_enet_resume(struct bgmac *bgmac)
+{
+ int rc;
+
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ rc = bgmac_dma_init(bgmac);
+ if (rc)
+ return rc;
+
+ bgmac_chip_init(bgmac);
+
+ napi_enable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_attach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ netif_start_queue(bgmac->net_dev);
+
+ phy_start(bgmac->net_dev->phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_resume);
+
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 6d1c6ff1ed96..c1818766c501 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -402,7 +402,7 @@
#define BGMAC_WEIGHT 64
-#define ETHER_MAX_LEN 1518
+#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
@@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
+int bgmac_enet_suspend(struct bgmac *bgmac);
+int bgmac_enet_resume(struct bgmac *bgmac);
struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9e8c06130c09..eccb3d1b6abb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2021,6 +2021,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVERHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -4277,7 +4278,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnx2x_setup_tc(dev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
}
/* called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b209b7f6093e..7dd83d0ef0a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6161,103 +6161,40 @@ static void bnx2x_link_int_ack(struct link_params *params,
}
}
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0xf0000000;
- u8 shift = 8*4;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
+
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if (shift == 4*4) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
+ ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ *len -= ret;
return 0;
}
static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0x00f00000;
- u8 shift = 8*3;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if ((shift == 4*4) || (shift == 4*2)) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
- return 0;
-}
-
-static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
-{
- str[0] = '\0';
- (*len)--;
+ ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ *len -= ret;
return 0;
}
@@ -10681,22 +10618,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 num;
num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
((raw_ver & 0xF000) >> 12);
- status = bnx2x_3_seq_format_ver(num, str, len);
- return status;
+ return bnx2x_3_seq_format_ver(num, str, len);
}
static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 spirom_ver;
+
spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
- status = bnx2x_format_ver(spirom_ver, str, len);
- return status;
+ return bnx2x_format_ver(spirom_ver, str, len);
}
static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
@@ -12547,13 +12481,12 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- int status = 0;
phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
if (phy_index == INT_PHY)
return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
- status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+
+ return bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
port, phy);
- return status;
}
static void bnx2x_phy_def_cfg(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1f1e54ba0ecb..b3ba66032980 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4482,9 +4482,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
- if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
- FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (BNXT_PF(bp)) {
+ u16 flags = le16_to_cpu(resp->flags);
+
+ if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
+ FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
+ }
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -4549,6 +4555,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -5198,9 +5207,10 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
- return bp->vf.max_irqs;
+ return min_t(unsigned int, bp->vf.max_irqs,
+ bp->vf.max_cp_rings);
#endif
- return bp->pf.max_irqs;
+ return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
@@ -5467,7 +5477,8 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed, fec;
+ u32 speed;
+ u16 fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5483,7 +5494,7 @@ static void bnxt_report_link(struct bnxt *bp)
else
flow_ctrl = "none";
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
- netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n",
@@ -5857,6 +5868,76 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
return 0;
}
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_alloc_input req = {0};
+ struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+ req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+ memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->wol_filter_id = resp->wol_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_free_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+ req.wol_filter_id = bp->wol_filter_id;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
+{
+ struct hwrm_wol_filter_qcfg_input req = {0};
+ struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 next_handle = 0;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.handle = cpu_to_le16(handle);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ next_handle = le16_to_cpu(resp->next_handle);
+ if (next_handle != 0) {
+ if (resp->wol_type ==
+ WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
+ bp->wol = 1;
+ bp->wol_filter_id = resp->wol_filter_id;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return next_handle;
+}
+
+static void bnxt_get_wol_settings(struct bnxt *bp)
+{
+ u16 handle = 0;
+
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
+ return;
+
+ do {
+ handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
+ } while (handle && handle != 0xffff);
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6042,6 +6123,43 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return rc;
}
+/* rtnl_lock held, open the NIC half way by allocating all resources, but
+ * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
+ * self tests.
+ */
+int bnxt_half_open_nic(struct bnxt *bp)
+{
+ int rc = 0;
+
+ rc = bnxt_alloc_mem(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto half_open_err;
+ }
+ rc = bnxt_init_nic(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto half_open_err;
+ }
+ return 0;
+
+half_open_err:
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+ dev_close(bp->dev);
+ return rc;
+}
+
+/* rtnl_lock held, this call can only be made after a previous successful
+ * call to bnxt_half_open_nic().
+ */
+void bnxt_half_close_nic(struct bnxt *bp)
+{
+ bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+}
+
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -6923,7 +7041,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnxt_setup_mq_tc(dev, ntc->tc);
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
@@ -7224,6 +7344,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
@@ -7546,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -7591,6 +7713,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
rc = register_netdev(dev);
if (rc)
goto init_err_clr_int;
@@ -7614,6 +7742,88 @@ init_err_free:
return rc;
}
+static void bnxt_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp;
+
+ if (!dev)
+ return;
+
+ rtnl_lock();
+ bp = netdev_priv(dev);
+ if (!bp)
+ goto shutdown_exit;
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_clear_int_mode(bp);
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+shutdown_exit:
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bnxt_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rc = bnxt_close(dev);
+ }
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ rtnl_unlock();
+ return rc;
+}
+
+static int bnxt_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ rc = -EBUSY;
+ goto resume_exit;
+ }
+ bnxt_get_wol_settings(bp);
+ if (netif_running(dev)) {
+ rc = bnxt_open(dev);
+ if (!rc)
+ netif_device_attach(dev);
+ }
+
+resume_exit:
+ rtnl_unlock();
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
+#define BNXT_PM_OPS (&bnxt_pm_ops)
+
+#else
+
+#define BNXT_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
/**
* bnxt_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -7730,6 +7940,8 @@ static struct pci_driver bnxt_pci_driver = {
.id_table = bnxt_pci_tbl,
.probe = bnxt_init_one,
.remove = bnxt_remove_one,
+ .shutdown = bnxt_shutdown,
+ .driver.pm = BNXT_PM_OPS,
.err_handler = &bnxt_err_handler,
#if defined(CONFIG_BNXT_SRIOV)
.sriov_configure = bnxt_sriov_configure,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c7a5b84a5cb2..3ef42dbc6327 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,6 +18,8 @@
#define DRV_VER_MIN 7
#define DRV_VER_UPD 0
+#include <linux/interrupt.h>
+
struct tx_bd {
__le32 tx_bd_len_flags_type;
#define TX_BD_TYPE (0x3f << 0)
@@ -424,8 +426,6 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_PKT_SIZE 52
-#define BNXT_NUM_TESTS(bp) 0
-
#define BNXT_DEFAULT_RX_RING_SIZE 511
#define BNXT_DEFAULT_TX_RING_SIZE 511
@@ -851,6 +851,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
@@ -862,6 +863,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
u16 support_auto_speeds;
u16 lp_auto_link_speeds;
u16 force_link_speed;
@@ -909,6 +911,14 @@ struct bnxt_led_info {
__le16 led_color_caps;
};
+#define BNXT_MAX_TEST 8
+
+struct bnxt_test_info {
+ u8 offline_mask;
+ u16 timeout;
+ char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -987,6 +997,7 @@ struct bnxt {
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
+ #define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
@@ -994,6 +1005,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
+ #define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1003,7 +1015,8 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
-#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
@@ -1178,6 +1191,12 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+ u8 num_tests;
+ struct bnxt_test_info *test_info;
+
+ u8 wol_filter_id;
+ u8 wol;
+
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
@@ -1236,8 +1255,12 @@ void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_half_open_nic(struct bnxt *bp);
+void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 03532061d211..46de2f8ff024 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_dcb.h"
@@ -241,6 +243,92 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
return 0;
}
+static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_fw_set_structured_data_input set = {0};
+ struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_struct_data_dcbx_app *fw_app;
+ struct hwrm_struct_hdr *data;
+ dma_addr_t mapping;
+ size_t data_len;
+ int rc, n, i;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ n = IEEE_8021QAZ_MAX_TCS;
+ data_len = sizeof(*data) + sizeof(*fw_app) * n;
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(data, 0, data_len);
+ bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
+ get.dest_data_addr = cpu_to_le64(mapping);
+ get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get.count = 0;
+ rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto set_app_exit;
+
+ fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+ if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+ rc = -ENODEV;
+ goto set_app_exit;
+ }
+
+ n = data->count;
+ for (i = 0; i < n; i++, fw_app++) {
+ if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
+ fw_app->protocol_selector == app->selector &&
+ fw_app->priority == app->priority) {
+ if (add)
+ goto set_app_exit;
+ else
+ break;
+ }
+ }
+ if (add) {
+ /* append */
+ n++;
+ fw_app->protocol_id = cpu_to_be16(app->protocol);
+ fw_app->protocol_selector = app->selector;
+ fw_app->priority = app->priority;
+ fw_app->valid = 1;
+ } else {
+ size_t len = 0;
+
+ /* not found, nothing to delete */
+ if (n == i)
+ goto set_app_exit;
+
+ len = (n - 1 - i) * sizeof(*fw_app);
+ if (len)
+ memmove(fw_app, fw_app + 1, len);
+ n--;
+ memset(fw_app + n, 0, sizeof(*fw_app));
+ }
+ data->count = n;
+ data->len = cpu_to_le16(sizeof(*fw_app) * n);
+ data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
+ set.src_data_addr = cpu_to_le64(mapping);
+ set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+ set.hdr_cnt = 1;
+ rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+
+set_app_exit:
+ dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -417,6 +505,15 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
return -EINVAL;
rc = dcb_ieee_setapp(dev, app);
+ if (rc)
+ return rc;
+
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+
return rc;
}
@@ -425,10 +522,19 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct bnxt *bp = netdev_priv(dev);
int rc;
- if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = dcb_ieee_delapp(dev, app);
+ if (rc)
+ return rc;
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 35a0d28cf2fd..ecd0a5e46a49 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6903a873f072..11ddf0adc6e1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,6 +18,7 @@
#include <linux/firmware.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
@@ -209,6 +211,10 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
return num_stats;
}
+ case ETH_SS_TEST:
+ if (!bp->num_tests)
+ return -EOPNOTSUPP;
+ return bp->num_tests;
default:
return -EOPNOTSUPP;
}
@@ -306,6 +312,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
break;
+ case ETH_SS_TEST:
+ if (bp->num_tests)
+ memcpy(buf, bp->test_info->string,
+ bp->num_tests * ETH_GSTRING_LEN);
+ break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
stringset);
@@ -824,7 +835,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
- info->testinfo_len = BNXT_NUM_TESTS(bp);
+ info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
@@ -832,6 +843,45 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
+static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+ if (bp->flags & BNXT_FLAG_WOL_CAP) {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ }
+}
+
+static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (!(bp->flags & BNXT_FLAG_WOL_CAP))
+ return -EINVAL;
+ if (!bp->wol) {
+ if (bnxt_hwrm_alloc_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 1;
+ }
+ } else {
+ if (bp->wol) {
+ if (bnxt_hwrm_free_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 0;
+ }
+ }
+ return 0;
+}
+
u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -879,6 +929,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
50000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR4_Full);\
if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
Pause); \
@@ -915,6 +968,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
50000baseCR2_Full)) \
(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
@@ -977,6 +1033,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
return SPEED_50000;
+ case BNXT_LINK_SPEED_100GB:
+ return SPEED_100000;
default:
return SPEED_UNKNOWN;
}
@@ -1042,7 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1082,6 +1140,10 @@ static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
break;
+ case SPEED_100000:
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
@@ -2128,12 +2190,372 @@ static int bnxt_set_phys_id(struct net_device *dev,
return rc;
}
+static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
+{
+ struct hwrm_selftest_irq_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_test_irq(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
+ int rc;
+
+ rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_mac_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+ req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+ if (enable)
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+ else
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 fw_advertising = link_info->advertising;
+ u16 fw_speed;
+ int rc;
+
+ if (!link_info->autoneg)
+ return 0;
+
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
+ if (netif_carrier_ok(bp->dev))
+ fw_speed = bp->link_info.link_speed;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+
+ req->force_link_speed = cpu_to_le16(fw_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
+ PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+ req->flags = 0;
+ req->force_link_speed = cpu_to_le16(0);
+ return rc;
+}
+
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+
+ if (enable) {
+ bnxt_disable_an_for_lpbk(bp, &req);
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ } else {
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+ }
+ req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u32 raw_cons, int pkt_size)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons, cons;
+ u8 *data;
+ u32 len;
+ int i;
+
+ cp_cons = RING_CMP(raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data_ptr;
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ if (len != pkt_size)
+ return -EIO;
+ i = ETH_ALEN;
+ if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
+ return -EIO;
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++) {
+ if (data[i] != (u8)(i & 0xff))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct tx_cmp *txcmp;
+ int rc = -EIO;
+ u32 raw_cons;
+ u32 cons;
+ int i;
+
+ cpr = &bnapi->cp_ring;
+ raw_cons = cpr->cp_raw_cons;
+ for (i = 0; i < 200; i++) {
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons)) {
+ udelay(5);
+ continue;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+ cpr->cp_raw_cons = raw_cons;
+ return rc;
+}
+
+static int bnxt_run_loopback(struct bnxt *bp)
+{
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ int pkt_size, i = 0;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ u8 *data;
+ int rc;
+
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
+ if (!skb)
+ return -ENOMEM;
+ data = skb_put(skb, pkt_size);
+ eth_broadcast_addr(data);
+ i += ETH_ALEN;
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++)
+ data[i] = (u8)(i & 0xff);
+
+ map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ rc = bnxt_poll_loopback(bp, pkt_size);
+
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
+{
+ struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_exec_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ resp->test_success = 0;
+ req.flags = test_mask;
+ rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+ *test_results = resp->test_success;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+#define BNXT_DRV_TESTS 3
+#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
+#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+
+static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ bool offline = false;
+ u8 test_results = 0;
+ u8 test_mask = 0;
+ int rc, i;
+
+ if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
+ if (!netif_running(dev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (bp->pf.active_vfs) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ return;
+ }
+ offline = true;
+ }
+
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if (!(bp->test_info->offline_mask & bit_val))
+ test_mask |= bit_val;
+ else if (offline)
+ test_mask |= bit_val;
+ }
+ if (!offline) {
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+ } else {
+ rc = bnxt_close_nic(bp, false, false);
+ if (rc)
+ return;
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
+ rc = bnxt_half_open_nic(bp);
+ if (rc) {
+ bnxt_hwrm_mac_loopback(bp, false);
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+ if (bnxt_run_loopback(bp))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ else
+ buf[BNXT_MACLPBK_TEST_IDX] = 0;
+
+ bnxt_hwrm_mac_loopback(bp, false);
+ bnxt_hwrm_phy_loopback(bp, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ bnxt_hwrm_phy_loopback(bp, false);
+ bnxt_half_close_nic(bp);
+ bnxt_open_nic(bp, false, true);
+ }
+ if (bnxt_test_irq(bp)) {
+ buf[BNXT_IRQ_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if ((test_mask & bit_val) && !(test_results & bit_val)) {
+ buf[i] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+}
+
+void bnxt_ethtool_init(struct bnxt *bp)
+{
+ struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_qlist_input req = {0};
+ struct bnxt_test_info *test_info;
+ int i, rc;
+
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto ethtool_init_exit;
+
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ if (!test_info)
+ goto ethtool_init_exit;
+
+ bp->test_info = test_info;
+ bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
+ if (bp->num_tests > BNXT_MAX_TEST)
+ bp->num_tests = BNXT_MAX_TEST;
+
+ test_info->offline_mask = resp->offline_tests;
+ test_info->timeout = le16_to_cpu(resp->test_timeout);
+ if (!test_info->timeout)
+ test_info->timeout = HWRM_CMD_TIMEOUT;
+ for (i = 0; i < bp->num_tests; i++) {
+ char *str = test_info->string[i];
+ char *fw_str = resp->test0_name + i * 32;
+
+ if (i == BNXT_MACLPBK_TEST_IDX) {
+ strcpy(str, "Mac loopback test (offline)");
+ } else if (i == BNXT_PHYLPBK_TEST_IDX) {
+ strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_IRQ_TEST_IDX) {
+ strcpy(str, "Interrupt_test (offline)");
+ } else {
+ strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+ if (test_info->offline_mask & (1 << i))
+ strncat(str, " (offline)",
+ ETH_GSTRING_LEN - strlen(str));
+ else
+ strncat(str, " (online)",
+ ETH_GSTRING_LEN - strlen(str));
+ }
+ }
+
+ethtool_init_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void bnxt_ethtool_free(struct bnxt *bp)
+{
+ kfree(bp->test_info);
+ bp->test_info = NULL;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_wol = bnxt_get_wol,
+ .set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
.set_coalesce = bnxt_set_coalesce,
.get_msglevel = bnxt_get_msglevel,
@@ -2161,4 +2583,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_eeprom = bnxt_get_module_eeprom,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
+ .self_test = bnxt_self_test,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ed1e555292e9..f1bc90b6fb5b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,5 +39,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+void bnxt_ethtool_init(struct bnxt *bp);
+void bnxt_ethtool_free(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 6e275c23d68b..7dc71bb95837 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -11,19 +11,21 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.7.0 */
+/* HSI and HWRM Specification 1.7.6 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_UPDATE 6
-#define HWRM_VERSION_STR "1.7.0"
+#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR "1.7.6.2"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
@@ -571,9 +573,10 @@ struct hwrm_ver_get_output {
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
+ u8 init_pending;
+ #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
u8 unused_0;
u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -809,6 +812,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -827,10 +832,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 unused_0;
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
- u8 unused_1;
- u8 unused_2;
+ u8 host_cnt;
+ #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL
+ u8 unused_0;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
@@ -867,12 +874,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_3;
+ u8 unused_1;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
- u8 unused_4;
+ u8 unused_2;
u8 valid;
};
@@ -888,16 +895,13 @@ struct hwrm_func_cfg_input {
u8 unused_0;
u8 unused_1;
__le32 flags;
- #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
- #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
- #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
- #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1013,7 +1017,7 @@ struct hwrm_func_qstats_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
+ __le64 tx_discard_pkts;
__le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
@@ -1021,7 +1025,7 @@ struct hwrm_func_qstats_output {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
+ __le64 rx_discard_pkts;
__le64 rx_drop_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
@@ -4743,25 +4747,72 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
+/* hwrm_wol_filter_alloc */
+/* Input (64 bytes) */
+struct hwrm_wol_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
+ __le32 unused_1;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
__le32 unused_2;
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
};
/* Output (16 bytes) */
-struct hwrm_nvm_read_output {
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free */
+/* Input (32 bytes) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4773,21 +4824,107 @@ struct hwrm_nvm_read_output {
u8 valid;
};
-/* hwrm_nvm_raw_dump */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_dump_input {
+/* hwrm_wol_filter_qcfg */
+/* Input (56 bytes) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ __le32 unused_0;
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3[3];
+ u8 unused_4;
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ __le16 unused_5[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg */
+/* Input (40 bytes) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ __le16 unused_4[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ u8 wol_pkt_len;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
__le32 offset;
__le32 len;
+ __le32 unused_2;
};
/* Output (16 bytes) */
-struct hwrm_nvm_raw_dump_output {
+struct hwrm_nvm_read_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4881,6 +5018,15 @@ struct hwrm_nvm_write_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ u8 unused_0[7];
+};
+
/* hwrm_nvm_modify */
/* Input (40 bytes) */
struct hwrm_nvm_modify_input {
@@ -5112,6 +5258,100 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
+/* hwrm_selftest_qlist */
+/* Input (16 bytes) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (248 bytes) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+};
+
+/* hwrm_selftest_exec */
+/* Input (24 bytes) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ __le16 unused_0[3];
+};
+
+/* hwrm_selftest_irq */
+/* Input (16 bytes) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
/* Hardware Resource Manager Specification */
/* Input (16 bytes) */
struct input {
@@ -5130,6 +5370,16 @@ struct output {
__le16 resp_len;
};
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
+
/* Command numbering (8 bytes) */
struct cmd_nums {
__le16 req_type;
@@ -5252,11 +5502,15 @@ struct cmd_nums {
#define HWRM_CFA_FLOW_FLUSH (0x105UL)
#define HWRM_CFA_FLOW_STATS (0x106UL)
#define HWRM_CFA_FLOW_INFO (0x107UL)
+ #define HWRM_SELFTEST_QLIST (0x200UL)
+ #define HWRM_SELFTEST_EXEC (0x201UL)
+ #define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
#define HWRM_NVM_GET_VARIABLE (0xfff1UL)
@@ -5464,6 +5718,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0b8cd7443843..b8e7248294d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -84,6 +85,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
u32 func_flags;
int rc;
+ if (bp->hwrm_spec_code < 0x10701)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -96,9 +100,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -134,8 +138,11 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
- ivi->qos = vf->flags & BNXT_VF_QOS;
- ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (vf->flags & BNXT_VF_QOS)
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
+ else
+ ivi->qos = 0;
+ ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
@@ -300,7 +307,6 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
for (i = 0; i < num_vfs; i++) {
vf = &bp->pf.vf[i];
memset(vf, 0, sizeof(*vf));
- vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
}
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 1ab72e4820af..dbc8d977fc5a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 899c30fb5188..9dae32756767 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -19,11 +19,10 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
-static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
{
struct bnxt_sw_tx_bd *tx_buf;
- struct tx_bd_ext *txbd1;
struct tx_bd *txbd;
u32 flags;
u16 prod;
@@ -33,23 +32,13 @@ static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
tx_buf->rx_prod = rx_prod;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
prod = NEXT_TX(prod);
- txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-
- txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
- txbd1->tx_bd_mss = cpu_to_le32(0);
- txbd1->tx_bd_cfa_action = cpu_to_le32(0);
- txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
-
- prod = NEXT_TX(prod);
txr->tx_prod = prod;
}
@@ -66,7 +55,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
last_tx_cons = tx_cons;
tx_cons = NEXT_TX(tx_cons);
- tx_cons = NEXT_TX(tx_cons);
}
txr->tx_cons = tx_cons;
if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
@@ -133,7 +121,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
case XDP_TX:
- if (tx_avail < 2) {
+ if (tx_avail < 1) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index b529f2c5355b..12a5ad66b564 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,6 +10,8 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 365895ed3c3e..a205a9ff9e17 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* GENET TDMA hardware does not support a configurable timeout, but will
* always generate an interrupt either after MBDONE packets have been
- * transmitted, or when the ring is emtpy.
+ * transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -707,6 +707,19 @@ struct bcmgenet_stats {
.reg_offset = offset, \
}
+#define STAT_GENET_Q(num) \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
+ tx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
+ tx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
+ rx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
+ rx_rings[num].errors), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].dropped)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -801,6 +814,12 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX queues */
+ STAT_GENET_Q(0),
+ STAT_GENET_Q(1),
+ STAT_GENET_Q(2),
+ STAT_GENET_Q(3),
+ STAT_GENET_Q(16),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1078,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= (EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv))
+ reg |= EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR;
+ else
+ reg |= EXT_PWR_DOWN_PHY;
+
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, false);
@@ -1104,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_BIAS);
- /* fallthrough */
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv)) {
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR);
+ reg |= EXT_PHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ mdelay(1);
+
+ reg &= ~EXT_PHY_RESET;
+ } else {
+ reg &= ~EXT_PWR_DOWN_PHY;
+ reg |= EXT_PWR_DN_EN_LD;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ break;
+
case GENET_POWER_CABLE_SENSE:
/* enable APD */
- reg |= EXT_PWR_DN_EN_LD;
+ if (!GENET_IS_V5(priv)) {
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
break;
case GENET_POWER_WOL_MAGIC:
bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1117,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
default:
break;
}
-
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE) {
- bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
- }
}
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int val = 0;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (!priv->phydev)
- val = -ENODEV;
- else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
- break;
-
- default:
- val = -EINVAL;
- break;
- }
+ if (!priv->phydev)
+ return -ENODEV;
- return val;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1240,14 +1271,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
- /* Compute how many buffers are transmitted since last xmit call */
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- c_index &= DMA_C_INDEX_MASK;
-
- if (likely(c_index >= ring->c_index))
- txbds_ready = c_index - ring->c_index;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX)
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_CLEAR);
else
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_CLEAR);
+
+ /* Compute how many buffers are transmitted since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+ & DMA_C_INDEX_MASK;
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, tx_done, dev,
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1280,15 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
}
ring->free_bds += txbds_processed;
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ ring->c_index = c_index;
- dev->stats.tx_packets += pkts_compl;
- dev->stats.tx_bytes += bytes_compl;
+ ring->packets += pkts_compl;
+ ring->bytes += bytes_compl;
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
pkts_compl, bytes_compl);
- return pkts_compl;
+ return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1657,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
- unsigned int p_index;
+ unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX) {
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+ INTRL2_CPU_CLEAR);
+ } else {
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv,
+ mask,
+ INTRL2_CPU_CLEAR);
+ }
+
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- dev->stats.rx_missed_errors += discards;
- dev->stats.rx_errors += discards;
+ ring->errors += discards;
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1680,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
}
p_index &= DMA_P_INDEX_MASK;
-
- if (likely(p_index >= ring->c_index))
- rxpkttoprocess = p_index - ring->c_index;
- else
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
- p_index;
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1696,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
@@ -1724,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_errors++;
+ ring->errors++;
dev_kfree_skb_any(skb);
goto next;
}
@@ -1773,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ ring->packets++;
+ ring->bytes += len;
if (dma_flag & DMA_RX_MULT)
dev->stats.multicast++;
@@ -1912,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
/* Mask all interrupts.*/
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1942,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv)
int ret;
u32 reg;
u32 int0_enable = 0;
- u32 int1_enable = 0;
- int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1970,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- /* Enable Rx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
- /* Enable Tx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
/* Configure backpressure vectors for MoCA */
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
@@ -1993,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv)
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- /* Enable Rx priority queue interrupts */
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
- /* Enable Tx priority queue interrupts */
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
- int1_enable |= (1 << i);
-
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- /* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
return 0;
@@ -2136,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << i);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2264,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2634,6 +2676,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
@@ -2921,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3101,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ unsigned long rx_bytes = 0, rx_packets = 0;
+ unsigned long rx_errors = 0, rx_dropped = 0;
+ struct bcmgenet_tx_ring *tx_ring;
+ struct bcmgenet_rx_ring *rx_ring;
+ unsigned int q;
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ tx_ring = &priv->tx_rings[q];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+ }
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+
+ for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ rx_ring = &priv->rx_rings[q];
+
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+ }
+ rx_ring = &priv->rx_rings[DESC_INDEX];
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_errors = rx_errors;
+ dev->stats.rx_missed_errors = rx_errors;
+ return &dev->stats;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -3113,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
#endif
+ .ndo_get_stats = bcmgenet_get_stats,
};
/* Array of GENET hardware parameters/characteristics */
@@ -3186,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
+ [GENET_V5] = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+ },
};
/* Infer hardware parameters from the detected GENET version */
@@ -3196,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
u8 major;
u16 gphy_rev;
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V4;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V3;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V2;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V1;
}
/* enum genet_version starts at 1 */
@@ -3225,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 5)
+ if (major == 6)
+ major = 5;
+ else if (major == 5)
major = 4;
else if (major == 0)
major = 1;
@@ -3253,19 +3364,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ if (GENET_IS_V5(priv)) {
+ /* The EPHY revision should come from the MDIO registers of
+ * the PHY not from GENET.
+ */
+ if (gphy_rev != 0) {
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+ gphy_rev);
+ }
/* This is reserved so should require special treatment */
- if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
- }
-
/* This is the good old scheme, just GPHY major, no minor nor patch */
- if ((gphy_rev & 0xf0) != 0)
+ } else if ((gphy_rev & 0xf0) != 0) {
priv->gphy_rev = gphy_rev << 8;
-
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
- else if ((gphy_rev & 0xff00) != 0)
+ } else if ((gphy_rev & 0xff00) != 0) {
priv->gphy_rev = gphy_rev;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -3295,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3493,7 +3611,7 @@ static int bcmgenet_suspend(struct device *d)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289d65ae..efd07020b89f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -355,8 +355,14 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DN_EN_LD (1 << 3)
#define EXT_ENERGY_DET (1 << 4)
#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_IDDQ_GLBL_PWR (1 << 7)
#define EXT_PHY_RESET (1 << 8)
#define EXT_ENERGY_DET_MASK (1 << 12)
+#define EXT_PWR_DOWN_PHY_TX (1 << 16)
+#define EXT_PWR_DOWN_PHY_RX (1 << 17)
+#define EXT_PWR_DOWN_PHY_SD (1 << 18)
+#define EXT_PWR_DOWN_PHY_RD (1 << 19)
+#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_LINK (1 << 4)
@@ -499,13 +505,15 @@ enum bcmgenet_version {
GENET_V1 = 1,
GENET_V2,
GENET_V3,
- GENET_V4
+ GENET_V4,
+ GENET_V5
};
#define GENET_IS_V1(p) ((p)->version == GENET_V1)
#define GENET_IS_V2(p) ((p)->version == GENET_V2)
#define GENET_IS_V3(p) ((p)->version == GENET_V3)
#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+#define GENET_IS_V5(p) ((p)->version == GENET_V5)
/* Hardware flags */
#define GENET_HAS_40BITS (1 << 0)
@@ -544,6 +552,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
+ unsigned long packets;
+ unsigned long bytes;
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -562,6 +572,10 @@ struct bcmgenet_tx_ring {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
+ unsigned long bytes;
+ unsigned long packets;
+ unsigned long errors;
+ unsigned long dropped;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b97122926d3a..2fbd027f0148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- u32 cpu_mask_clear;
int retries = 0;
u32 reg;
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- /* Enable the MPD interrupt */
- cpu_mask_clear = UMAC_IRQ_MPD_R;
-
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
-
return 0;
}
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
- u32 cpu_mask_set;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->crc_fwd_en = 0;
-
- /* Stop monitoring magic packet IRQ */
- cpu_mask_set = UMAC_IRQ_MPD_R;
-
- /* Stop monitoring magic packet IRQ */
- bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 2f9281936f0e..285676f8da6b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -195,39 +195,43 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (!GENET_IS_V4(priv))
- return;
-
- reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
- if (enable) {
- reg &= ~EXT_CK25_DIS;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
- reg |= EXT_GPHY_RESET;
+ if (GENET_IS_V4(priv)) {
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ if (enable) {
+ reg &= ~EXT_CK25_DIS;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~EXT_GPHY_RESET;
+ } else {
+ reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+ reg |= EXT_CK25_DIS;
+ }
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~EXT_GPHY_RESET;
+ udelay(60);
} else {
- reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg |= EXT_CK25_DIS;
}
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- udelay(60);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ if (!GENET_IS_V5(priv)) {
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ }
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->phydev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 30d1eb9ebec9..f395b951f5e7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
return timeout_us ? 0 : -EBUSY;
}
+#ifdef CONFIG_TIGON3_HWMON
static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
{
u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
return 0;
}
+#endif
static int tg3_ape_send_event(struct tg3 *tp, u32 event)
{
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
}
}
+#else
+static inline void tg3_hwmon_close(struct tg3 *tp) { }
+static inline void tg3_hwmon_open(struct tg3 *tp) { }
+#endif /* CONFIG_TIGON3_HWMON */
#define TG3_STAT_ADD32(PSTAT, REG) \
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 30606b11b128..91f7492623d3 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev)
}
pdata = dev_get_platdata(&bp->pdev->dev);
- if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
- "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ if (pdata) {
+ if (gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev,
+ pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ } else {
+ phydev->irq = PHY_POLL;
}
- } else {
- phydev->irq = PHY_POLL;
}
/* attach the mac to the phy */
@@ -684,8 +686,8 @@ static void macb_tx_error_task(struct work_struct *work)
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
- bp->stats.tx_packets++;
- bp->stats.tx_bytes += skb->len;
+ bp->dev->stats.tx_packets++;
+ bp->dev->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -778,8 +780,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
- bp->stats.tx_packets++;
- bp->stats.tx_bytes += skb->len;
+ bp->dev->stats.tx_packets++;
+ bp->dev->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -911,14 +913,14 @@ static int gem_rx(struct macb *bp, int budget)
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
break;
}
skb = bp->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
@@ -938,8 +940,8 @@ static int gem_rx(struct macb *bp, int budget)
GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- bp->stats.rx_packets++;
- bp->stats.rx_bytes += skb->len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += skb->len;
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
@@ -984,7 +986,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
*/
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@@ -1030,8 +1032,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
__skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
- bp->stats.rx_packets++;
- bp->stats.rx_bytes += skb->len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
@@ -2210,7 +2212,7 @@ static void gem_update_stats(struct macb *bp)
static struct net_device_stats *gem_get_stats(struct macb *bp)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->stats;
+ struct net_device_stats *nstat = &bp->dev->stats;
gem_update_stats(bp);
@@ -2281,7 +2283,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->stats;
+ struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
if (macb_is_gem(bp))
@@ -2993,15 +2995,15 @@ static void at91ether_rx(struct net_device *dev)
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pktlen;
netif_rx(skb);
} else {
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
- lp->stats.multicast++;
+ dev->stats.multicast++;
/* reset ownership bit */
desc->addr &= ~MACB_BIT(RX_USED);
@@ -3036,15 +3038,15 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & MACB_BIT(TCOMP)) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr,
lp->skb_length, DMA_TO_DEVICE);
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += lp->skb_length;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += lp->skb_length;
}
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 234a49eaccfd..ec037b0fa2a4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -919,7 +919,6 @@ struct macb {
struct clk *rx_clk;
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 2fedd91f3df8..dee604651ba7 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -43,6 +43,8 @@ struct octeon_cn23xx_pf {
struct octeon_config *conf;
};
+#define CN23XX_SLI_DEF_BP 0x40
+
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
int validate_cn23xx_pf_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f629c2fe04a4..796c2cbc11f6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -26,6 +26,9 @@
#include "octeon_main.h"
#include "octeon_network.h"
+/* OOM task polling interval */
+#define LIO_OOM_POLL_INTERVAL_MS 250
+
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
struct lio *lio = GET_LIO(netdev);
@@ -124,6 +127,17 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct octeon_device *oct = lio->oct_dev;
u8 *mac;
+ if (nctrl->completion && nctrl->response_code) {
+ /* Signal whoever is interested that the response code from the
+ * firmware has arrived.
+ */
+ WRITE_ONCE(*nctrl->response_code, nctrl->status);
+ complete(nctrl->completion);
+ }
+
+ if (nctrl->status)
+ return;
+
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
case OCTNET_CMD_SET_MULTI_LIST:
@@ -131,11 +145,20 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
case OCTNET_CMD_CHANGE_MACADDR:
mac = ((u8 *)&nctrl->udd[0]) + 2;
- netif_info(lio, probe, lio->netdev,
- "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- mac[0], mac[1],
- mac[2], mac[3],
- mac[4], mac[5]);
+ if (nctrl->ncmd.s.param1) {
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ int vfidx = nctrl->ncmd.s.param1 - 1;
+ bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
+
+ if (mac_is_admin_assigned)
+ netif_info(lio, probe, lio->netdev,
+ "MAC Address %pM is configured for VF %d\n",
+ mac, vfidx);
+ } else {
+ netif_info(lio, probe, lio->netdev,
+ " MACAddr changed to %pM\n",
+ mac);
+ }
break;
case OCTNET_CMD_CHANGE_MTU:
@@ -284,3 +307,56 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
* the PF did that already
*/
}
+
+static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_droq *droq;
+ int q, q_no = 0;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ droq = oct->droq[q_no];
+ if (!droq)
+ continue;
+ octeon_droq_check_oom(droq);
+ }
+ }
+ queue_delayed_work(lio->rxq_status_wq.wq,
+ &lio->rxq_status_wq.wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
+int setup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->rxq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
+ octnet_poll_check_rxq_oom_status);
+ lio->rxq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->rxq_status_wq.wq,
+ &lio->rxq_status_wq.wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+ return 0;
+}
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->rxq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
+ flush_workqueue(lio->rxq_status_wq.wq);
+ destroy_workqueue(lio->rxq_status_wq.wq);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 50384cede8be..579dc7336f58 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -33,6 +33,19 @@
static int octnet_get_link_stats(struct net_device *netdev);
+struct oct_intrmod_context {
+ int octeon_id;
+ wait_queue_head_t wc;
+ int cond;
+ int status;
+};
+
+struct oct_intrmod_resp {
+ u64 rh;
+ struct oct_intrmod_cfg intrmod;
+ u64 status;
+};
+
struct oct_mdio_cmd_context {
int octeon_id;
wait_queue_head_t wc;
@@ -213,17 +226,23 @@ static int lio_get_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
- u32 supported, advertising;
+ u32 supported = 0, advertising = 0;
linfo = &lio->linfo;
if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->base.port = PORT_FIBRE;
- supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
- SUPPORTED_Pause);
- advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+
+ if (linfo->link.s.speed == SPEED_10000) {
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
+ advertising |= ADVERTISED_Pause;
ethtool_convert_legacy_u32_to_link_mode(
ecmd->link_modes.supported, supported);
ethtool_convert_legacy_u32_to_link_mode(
@@ -1292,95 +1311,101 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int lio_get_intr_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *intr_coal)
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+ u32 status,
+ void *ptr)
{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octeon_instr_queue *iq;
- struct oct_intrmod_cfg *intrmod_cfg;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_intrmod_context *ctx;
- intrmod_cfg = &oct->intrmod;
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
- switch (oct->chip_id) {
- case OCTEON_CN23XX_PF_VID:
- case OCTEON_CN23XX_VF_VID:
- if (!intrmod_cfg->rx_enable) {
- intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
- intr_coal->rx_max_coalesced_frames =
- intrmod_cfg->rx_frames;
- }
- if (!intrmod_cfg->tx_enable)
- intr_coal->tx_max_coalesced_frames =
- intrmod_cfg->tx_frames;
- break;
- case OCTEON_CN68XX:
- case OCTEON_CN66XX: {
- struct octeon_cn6xxx *cn6xxx =
- (struct octeon_cn6xxx *)oct->chip;
+ ctx->status = status;
- if (!intrmod_cfg->rx_enable) {
- intr_coal->rx_coalesce_usecs =
- CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
- intr_coal->rx_max_coalesced_frames =
- CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
- }
- iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
- intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
- break;
- }
- default:
- netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/* get interrupt moderation parameters */
+static int octnet_get_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_context *ctx;
+ struct oct_intrmod_resp *resp;
+ int retval;
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_intrmod_resp),
+ sizeof(struct oct_intrmod_context));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_intrmod_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_intrmod_resp));
+
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
+ memset(ctx, 0, sizeof(struct oct_intrmod_context));
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
+
+ sc->callback = octnet_intrmod_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
return -EINVAL;
}
- if (intrmod_cfg->rx_enable) {
- intr_coal->use_adaptive_rx_coalesce =
- intrmod_cfg->rx_enable;
- intr_coal->rate_sample_interval =
- intrmod_cfg->check_intrvl;
- intr_coal->pkt_rate_high =
- intrmod_cfg->maxpkt_ratethr;
- intr_coal->pkt_rate_low =
- intrmod_cfg->minpkt_ratethr;
- intr_coal->rx_max_coalesced_frames_high =
- intrmod_cfg->rx_maxcnt_trigger;
- intr_coal->rx_coalesce_usecs_high =
- intrmod_cfg->rx_maxtmr_trigger;
- intr_coal->rx_coalesce_usecs_low =
- intrmod_cfg->rx_mintmr_trigger;
- intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->rx_mincnt_trigger;
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
+ goto intrmod_info_wait_intr;
}
- if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
- (intrmod_cfg->tx_enable)) {
- intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
- intr_coal->tx_max_coalesced_frames_high =
- intrmod_cfg->tx_maxcnt_trigger;
- intr_coal->tx_max_coalesced_frames_low =
- intrmod_cfg->tx_mincnt_trigger;
+
+ retval = ctx->status || resp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "Get interrupt moderation parameters failed\n");
+ goto intrmod_info_wait_fail;
}
- return 0;
-}
-/* Callback function for intrmod */
-static void octnet_intrmod_callback(struct octeon_device *oct_dev,
- u32 status,
- void *ptr)
-{
- struct oct_intrmod_cmd *cmd = ptr;
- struct octeon_soft_command *sc = cmd->sc;
+ octeon_swap_8B_data((u64 *)&resp->intrmod,
+ (sizeof(struct oct_intrmod_cfg)) / 8);
+ memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
+ octeon_free_soft_command(oct_dev, sc);
- oct_dev = cmd->oct_dev;
+ return 0;
- if (status)
- dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
- CVM_CAST64(status));
- else
- dev_info(&oct_dev->pci_dev->dev,
- "Rx-Adaptive Interrupt moderation enabled:%llx\n",
- oct_dev->intrmod.rx_enable);
+intrmod_info_wait_fail:
octeon_free_soft_command(oct_dev, sc);
+
+intrmod_info_wait_intr:
+
+ return -ENODEV;
}
/* Configure interrupt moderation parameters */
@@ -1388,7 +1413,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
- struct oct_intrmod_cmd *cmd;
+ struct oct_intrmod_context *ctx;
struct oct_intrmod_cfg *cfg;
int retval;
struct octeon_device *oct_dev = lio->oct_dev;
@@ -1398,19 +1423,21 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
octeon_alloc_soft_command(oct_dev,
sizeof(struct oct_intrmod_cfg),
0,
- sizeof(struct oct_intrmod_cmd));
+ sizeof(struct oct_intrmod_context));
if (!sc)
return -ENOMEM;
- cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct_dev);
+ init_waitqueue_head(&ctx->wc);
+
cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
- cmd->sc = sc;
- cmd->cfg = cfg;
- cmd->oct_dev = oct_dev;
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
@@ -1418,7 +1445,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
sc->callback = octnet_intrmod_callback;
- sc->callback_arg = cmd;
+ sc->callback_arg = sc;
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct_dev, sc);
@@ -1427,7 +1454,29 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return -EINVAL;
}
- return 0;
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
+ retval = ctx->status;
+ if (retval)
+ dev_err(&oct_dev->pci_dev->dev,
+ "intrmod config failed. Status: %llx\n",
+ CVM_CAST64(retval));
+ else
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation %s\n",
+ (intr_cfg->rx_enable) ?
+ "enabled" : "disabled");
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return ((retval) ? -ENODEV : 0);
+ }
+
+ dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
+
+ return -EINTR;
}
static void
@@ -1584,80 +1633,106 @@ static int octnet_get_link_stats(struct net_device *netdev)
return 0;
}
-/* Enable/Disable auto interrupt Moderation */
-static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
- *intr_coal)
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
{
- int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct oct_intrmod_cfg *intrmod_cfg;
-
- intrmod_cfg = &oct->intrmod;
-
- if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
- if (intr_coal->rate_sample_interval)
- intrmod_cfg->check_intrvl =
- intr_coal->rate_sample_interval;
- else
- intrmod_cfg->check_intrvl =
- LIO_INTRMOD_CHECK_INTERVAL;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg intrmod_cfg;
- if (intr_coal->pkt_rate_high)
- intrmod_cfg->maxpkt_ratethr =
- intr_coal->pkt_rate_high;
- else
- intrmod_cfg->maxpkt_ratethr =
- LIO_INTRMOD_MAXPKT_RATETHR;
+ if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
+ return -ENODEV;
- if (intr_coal->pkt_rate_low)
- intrmod_cfg->minpkt_ratethr =
- intr_coal->pkt_rate_low;
- else
- intrmod_cfg->minpkt_ratethr =
- LIO_INTRMOD_MINPKT_RATETHR;
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID: {
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ oct->rx_max_coalesced_frames;
+ }
+ if (!intrmod_cfg.tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ oct->tx_max_coalesced_frames;
+ break;
}
- if (oct->intrmod.rx_enable) {
- if (intr_coal->rx_max_coalesced_frames_high)
- intrmod_cfg->rx_maxcnt_trigger =
- intr_coal->rx_max_coalesced_frames_high;
- else
- intrmod_cfg->rx_maxcnt_trigger =
- LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
- if (intr_coal->rx_coalesce_usecs_high)
- intrmod_cfg->rx_maxtmr_trigger =
- intr_coal->rx_coalesce_usecs_high;
- else
- intrmod_cfg->rx_maxtmr_trigger =
- LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ }
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+ }
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+ if (intrmod_cfg.rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg.rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg.check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg.maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg.minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg.rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg.rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg.rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg.rx_mincnt_trigger;
+ }
+ if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
+ (intrmod_cfg.tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce =
+ intrmod_cfg.tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg.tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg.tx_mincnt_trigger;
+ }
+ return 0;
+}
- if (intr_coal->rx_coalesce_usecs_low)
- intrmod_cfg->rx_mintmr_trigger =
- intr_coal->rx_coalesce_usecs_low;
- else
- intrmod_cfg->rx_mintmr_trigger =
- LIO_INTRMOD_RXMINTMR_TRIGGER;
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod_cfg,
+ struct ethtool_coalesce *intr_coal)
+{
+ int ret = 0;
- if (intr_coal->rx_max_coalesced_frames_low)
- intrmod_cfg->rx_mincnt_trigger =
- intr_coal->rx_max_coalesced_frames_low;
- else
- intrmod_cfg->rx_mincnt_trigger =
- LIO_INTRMOD_RXMINCNT_TRIGGER;
+ if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
+ intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
+ intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
+ intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
}
- if (oct->intrmod.tx_enable) {
- if (intr_coal->tx_max_coalesced_frames_high)
- intrmod_cfg->tx_maxcnt_trigger =
- intr_coal->tx_max_coalesced_frames_high;
- else
- intrmod_cfg->tx_maxcnt_trigger =
- LIO_INTRMOD_TXMAXCNT_TRIGGER;
- if (intr_coal->tx_max_coalesced_frames_low)
- intrmod_cfg->tx_mincnt_trigger =
- intr_coal->tx_max_coalesced_frames_low;
- else
- intrmod_cfg->tx_mincnt_trigger =
- LIO_INTRMOD_TXMINCNT_TRIGGER;
+ if (intrmod_cfg->rx_enable) {
+ intrmod_cfg->rx_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ intrmod_cfg->rx_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ intrmod_cfg->rx_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ intrmod_cfg->rx_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ }
+ if (intrmod_cfg->tx_enable) {
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
}
ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
@@ -1666,7 +1741,9 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
}
static int
-oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+oct_cfg_rx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 rx_max_coalesced_frames;
@@ -1692,7 +1769,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
int q_no;
if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ rx_max_coalesced_frames = intrmod->rx_frames;
else
rx_max_coalesced_frames =
intr_coal->rx_max_coalesced_frames;
@@ -1703,17 +1780,18 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
(octeon_read_csr64(
oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
- rx_max_coalesced_frames);
+ (rx_max_coalesced_frames - 1));
/*consider setting resend bit*/
}
- oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
break;
}
case OCTEON_CN23XX_VF_VID: {
int q_no;
if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ rx_max_coalesced_frames = intrmod->rx_frames;
else
rx_max_coalesced_frames =
intr_coal->rx_max_coalesced_frames;
@@ -1724,9 +1802,10 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
rx_max_coalesced_frames);
- /* consider writing to resend bit here */
+ /*consider writing to resend bit here*/
}
- oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
break;
}
default:
@@ -1736,6 +1815,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
}
static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
@@ -1766,7 +1846,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
int q_no;
if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ rx_coalesce_usecs = intrmod->rx_usecs;
else
rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
time_threshold =
@@ -1775,11 +1855,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
q_no += oct->sriov_info.pf_srn;
octeon_write_csr64(oct,
CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
- (oct->intrmod.rx_frames |
- (time_threshold << 32)));
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
/*consider writing to resend bit here*/
}
- oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
break;
}
case OCTEON_CN23XX_VF_VID: {
@@ -1787,7 +1868,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
int q_no;
if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ rx_coalesce_usecs = intrmod->rx_usecs;
else
rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
@@ -1796,11 +1877,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
for (q_no = 0; q_no < oct->num_oqs; q_no++) {
octeon_write_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
- (oct->intrmod.rx_frames |
- (time_threshold << 32)));
- /* consider setting resend bit */
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
+ /*consider setting resend bit*/
}
- oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
break;
}
default:
@@ -1811,8 +1893,9 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
}
static int
-oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
- __attribute__((unused)))
+oct_cfg_tx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 iq_intr_pkt;
@@ -1839,12 +1922,13 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
val = readq(inst_cnt_reg);
/*clear wmark and count.dont want to write count back*/
val = (val & 0xFFFF000000000000ULL) |
- ((u64)iq_intr_pkt
+ ((u64)(iq_intr_pkt - 1)
<< CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
writeq(val, inst_cnt_reg);
/*consider setting resend bit*/
}
- oct->intrmod.tx_frames = iq_intr_pkt;
+ intrmod->tx_frames = iq_intr_pkt;
+ oct->tx_max_coalesced_frames = iq_intr_pkt;
break;
}
default:
@@ -1859,6 +1943,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
int ret;
struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg intrmod = {0};
u32 j, q_no;
int db_max, db_min;
@@ -1877,8 +1962,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
} else {
dev_err(&oct->pci_dev->dev,
"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
- intr_coal->tx_max_coalesced_frames, db_min,
- db_max);
+ intr_coal->tx_max_coalesced_frames,
+ db_min, db_max);
return -EINVAL;
}
break;
@@ -1889,24 +1974,36 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
return -EINVAL;
}
- oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
- oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+ intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+ intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
- ret = oct_cfg_adaptive_intr(lio, intr_coal);
+ ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
if (!intr_coal->use_adaptive_rx_coalesce) {
- ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
- ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
+ } else {
+ oct->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ oct->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
}
+
if (!intr_coal->use_adaptive_tx_coalesce) {
- ret = oct_cfg_tx_intrcnt(lio, intr_coal);
+ ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
+ } else {
+ oct->tx_max_coalesced_frames =
+ CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
}
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 92f46b1375c3..927617cbf6a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -16,6 +16,7 @@
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <net/vxlan.h>
@@ -60,12 +61,6 @@ MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
static int ptp_enable = 1;
-/* Bit mask values for lio->ifstate */
-#define LIO_IFSTATE_DROQ_OPS 0x01
-#define LIO_IFSTATE_REGISTERED 0x02
-#define LIO_IFSTATE_RUNNING 0x04
-#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
/* Polling interval for determining when NIC application is alive */
#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
@@ -178,6 +173,8 @@ static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
static int liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+ int linkstate);
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
@@ -531,36 +528,6 @@ static void liquidio_deinit_pci(void)
}
/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static inline int ifstate_check(struct lio *lio, int state_flag)
-{
- return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static inline void ifstate_set(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static inline void ifstate_reset(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
-/**
* \brief Stop Tx queues
* @param netdev network device
*/
@@ -748,7 +715,8 @@ static void delete_glists(struct lio *lio)
kfree(g);
} while (g);
- if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
@@ -805,7 +773,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
}
for (i = 0; i < num_iqs; i++) {
- int numa_node = cpu_to_node(i % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
spin_lock_init(&lio->glist_lock[i]);
@@ -967,14 +935,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
netif_wake_subqueue(netdev, iq->q_index);
- } else {
- if (!octnet_iq_is_full(oct, lio->txq)) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
- lio->txq,
- tx_restart, 1);
- wake_q(netdev, lio->txq);
- }
}
+ } else if (netif_queue_stopped(netdev) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, lio->txq))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq, tx_restart, 1);
+ netif_wake_queue(netdev);
}
}
@@ -1084,16 +1051,35 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
int i;
int num_ioq_vectors;
int num_alloc_ioq_vectors;
+ char *queue_irq_names = NULL;
+ char *aux_irq_name = NULL;
if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
/* one non ioq interrupt for handling sli_mac_pf_int_sum */
oct->num_msix_irqs += 1;
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
+ GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+ aux_irq_name = &queue_irq_names
+ [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+
oct->msix_entries = kcalloc(
oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries)
- return 1;
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
msix_entries = (struct msix_entry *)oct->msix_entries;
/*Assumption is that pf msix vectors start from pf srn to pf to
@@ -1111,7 +1097,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
@@ -1119,9 +1107,12 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
/** For PF, there is one non-ioq interrupt handler */
num_ioq_vectors -= 1;
+
+ snprintf(aux_irq_name, INTRNAMSIZ,
+ "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
irqret = request_irq(msix_entries[num_ioq_vectors].vector,
- liquidio_legacy_intr_handler, 0, "octeon",
- oct);
+ liquidio_legacy_intr_handler, 0,
+ aux_irq_name, oct);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1129,13 +1120,20 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
for (i = 0; i < num_ioq_vectors; i++) {
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, i);
+
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
- "octeon", &oct->ioq_vector[i]);
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1155,7 +1153,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
@@ -1173,111 +1173,150 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
else
oct->flags |= LIO_FLAG_MSI_ENABLED;
+ /* allocate storage for the names assigned to the irq */
+ oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage)
+ return -ENOMEM;
+
+ queue_irq_names = oct->irq_name_storage;
+
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, 0);
+
irqret = request_irq(oct->pci_dev->irq,
- liquidio_legacy_intr_handler, IRQF_SHARED,
- "octeon", oct);
+ liquidio_legacy_intr_handler,
+ IRQF_SHARED,
+ &queue_irq_names[IRQ_NAME_OFF(0)], oct);
if (irqret) {
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
irqret);
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
}
return 0;
}
+static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_device *other_oct;
+
+ other_oct = lio_get_device(oct->octeon_id + 1);
+
+ if (other_oct && other_oct->pci_dev) {
+ int oct_busnum, other_oct_busnum;
+
+ oct_busnum = oct->pci_dev->bus->number;
+ other_oct_busnum = other_oct->pci_dev->bus->number;
+
+ if (oct_busnum == other_oct_busnum) {
+ int oct_slot, other_oct_slot;
+
+ oct_slot = PCI_SLOT(oct->pci_dev->devfn);
+ other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
+
+ if (oct_slot == other_oct_slot)
+ return other_oct;
+ }
+ }
+
+ return NULL;
+}
+
+static void disable_all_vf_links(struct octeon_device *oct)
+{
+ struct net_device *netdev;
+ int max_vfs, vf, i;
+
+ if (!oct)
+ return;
+
+ max_vfs = oct->sriov_info.max_vfs;
+
+ for (i = 0; i < oct->ifcount; i++) {
+ netdev = oct->props[i].netdev;
+ if (!netdev)
+ continue;
+
+ for (vf = 0; vf < max_vfs; vf++)
+ liquidio_set_vf_link_state(netdev, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ }
+}
+
static int liquidio_watchdog(void *param)
{
- u64 wdog;
- u16 mask_of_stuck_cores = 0;
- u16 mask_of_crashed_cores = 0;
- int core_num;
- u8 core_is_stuck[LIO_MAX_CORES];
- u8 core_crashed[LIO_MAX_CORES];
+ bool err_msg_was_printed[LIO_MAX_CORES];
+ u16 mask_of_crashed_or_stuck_cores = 0;
+ bool all_vf_links_are_disabled = false;
struct octeon_device *oct = param;
+ struct octeon_device *other_oct;
+#ifdef CONFIG_MODULE_UNLOAD
+ long refcount, vfs_referencing_pf;
+ u64 vfs_mask1, vfs_mask2;
+#endif
+ int core;
- memset(core_is_stuck, 0, sizeof(core_is_stuck));
- memset(core_crashed, 0, sizeof(core_crashed));
+ memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
while (!kthread_should_stop()) {
- mask_of_crashed_cores =
+ /* sleep for a couple of seconds so that we don't hog the CPU */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(2000));
+
+ mask_of_crashed_or_stuck_cores =
(u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
- for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
- if (!core_is_stuck[core_num]) {
- wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
-
- /* look at watchdog state field */
- wdog &= CIU3_WDOG_MASK;
- if (wdog) {
- /* this watchdog timer has expired */
- core_is_stuck[core_num] =
- LIO_MONITOR_WDOG_EXPIRE;
- mask_of_stuck_cores |= (1 << core_num);
- }
- }
+ if (!mask_of_crashed_or_stuck_cores)
+ continue;
- if (!core_crashed[core_num])
- core_crashed[core_num] =
- (mask_of_crashed_cores >> core_num) & 1;
- }
+ WRITE_ONCE(oct->cores_crashed, true);
+ other_oct = get_other_octeon_device(oct);
+ if (other_oct)
+ WRITE_ONCE(other_oct->cores_crashed, true);
- if (mask_of_stuck_cores) {
- for (core_num = 0; core_num < LIO_MAX_CORES;
- core_num++) {
- if (core_is_stuck[core_num] == 1) {
- dev_err(&oct->pci_dev->dev,
- "ERROR: Octeon core %d is stuck!\n",
- core_num);
- /* 2 means we have printk'd an error
- * so no need to repeat the same printk
- */
- core_is_stuck[core_num] =
- LIO_MONITOR_CORE_STUCK_MSGD;
- }
- }
- }
+ for (core = 0; core < LIO_MAX_CORES; core++) {
+ bool core_crashed_or_got_stuck;
- if (mask_of_crashed_cores) {
- for (core_num = 0; core_num < LIO_MAX_CORES;
- core_num++) {
- if (core_crashed[core_num] == 1) {
- dev_err(&oct->pci_dev->dev,
- "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
- core_num);
- /* 2 means we have printk'd an error
- * so no need to repeat the same printk
- */
- core_crashed[core_num] =
- LIO_MONITOR_CORE_STUCK_MSGD;
- }
+ core_crashed_or_got_stuck =
+ (mask_of_crashed_or_stuck_cores
+ >> core) & 1;
+
+ if (core_crashed_or_got_stuck &&
+ !err_msg_was_printed[core]) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
+ core);
+ err_msg_was_printed[core] = true;
}
}
+
+ if (all_vf_links_are_disabled)
+ continue;
+
+ disable_all_vf_links(oct);
+ disable_all_vf_links(other_oct);
+ all_vf_links_are_disabled = true;
+
#ifdef CONFIG_MODULE_UNLOAD
- if (mask_of_stuck_cores || mask_of_crashed_cores) {
- /* make module refcount=0 so that rmmod will work */
- long refcount;
+ vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
+ vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
- refcount = module_refcount(THIS_MODULE);
+ vfs_referencing_pf = hweight64(vfs_mask1);
+ vfs_referencing_pf += hweight64(vfs_mask2);
- while (refcount > 0) {
+ refcount = module_refcount(THIS_MODULE);
+ if (refcount >= vfs_referencing_pf) {
+ while (vfs_referencing_pf) {
module_put(THIS_MODULE);
- refcount = module_refcount(THIS_MODULE);
- }
-
- /* compensate for and withstand an unlikely (but still
- * possible) race condition
- */
- while (refcount < 0) {
- try_module_get(THIS_MODULE);
- refcount = module_refcount(THIS_MODULE);
+ vfs_referencing_pf--;
}
}
#endif
- /* sleep for two seconds */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(2 * HZ);
}
return 0;
@@ -1369,6 +1408,12 @@ liquidio_probe(struct pci_dev *pdev,
return 0;
}
+static bool fw_type_is_none(void)
+{
+ return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+ sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
+}
+
/**
*\brief Destroy resources associated with octeon device
* @param pdev PCI device structure
@@ -1449,6 +1494,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msi(oct->pci_dev);
}
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+
/* fallthrough */
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct))
@@ -1508,9 +1556,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
- /* Soft reset the octeon device before exiting */
- if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
- oct->fn_list.soft_reset(oct);
+ if (!fw_type_is_none()) {
+ /* Soft reset the octeon device before exiting */
+ if (!OCTEON_CN23XX_PF(oct) ||
+ (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+ oct->fn_list.soft_reset(oct);
+ }
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1643,6 +1694,15 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
liquidio_stop(netdev);
+ if (fw_type_is_none()) {
+ struct octnic_ctrl_pkt nctrl;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ }
+
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
@@ -1658,6 +1718,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_link_status_change_wq(netdev);
+ cleanup_rx_oom_poll_fn(netdev);
+
delete_glists(lio);
free_netdev(netdev);
@@ -2126,8 +2188,7 @@ static int load_firmware(struct octeon_device *oct)
char fw_name[LIO_MAX_FW_FILENAME_LEN];
char *tmp_fw_type;
- if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
- sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+ if (fw_type_is_none()) {
dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
return ret;
}
@@ -2211,8 +2272,8 @@ static void if_cfg_callback(struct octeon_device *oct,
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
+ CVM_CAST64(resp->status), status);
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
@@ -2437,8 +2498,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no];
if (iq) {
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -2555,6 +2619,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
__func__);
return 1;
}
+
+ if (octeon_dev->ioq_vector) {
+ struct octeon_ioq_vector *ioq_vector;
+
+ ioq_vector = &octeon_dev->ioq_vector[q];
+ netif_set_xps_queue(netdev,
+ &ioq_vector->affinity_mask,
+ ioq_vector->iq_index);
+ }
}
return 0;
@@ -3426,6 +3499,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
@@ -3459,6 +3534,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -3596,7 +3673,8 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
nctrl.wait_time = LIO_CMD_WAIT_TM;
nctrl.udd[0] = 0;
@@ -4122,6 +4200,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -4271,7 +4352,6 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
*/
static int liquidio_init_nic_module(struct octeon_device *oct)
{
- struct oct_intrmod_cfg *intrmod_cfg;
int i, retval = 0;
int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
@@ -4296,22 +4376,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
liquidio_ptp_init(oct);
- /* Initialize interrupt moderation params */
- intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
- intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
- intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
- intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
- intrmod_cfg->tx_enable = 1;
- intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
- intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
- intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
- intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
- intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -4373,6 +4437,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
struct octeon_device *oct = (struct octeon_device *)buf;
struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
int i, notice, vf_idx;
+ bool cores_crashed;
u64 *data, vf_num;
notice = recv_pkt->rh.r.ossp;
@@ -4383,19 +4448,23 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
octeon_swap_8B_data(&vf_num, 1);
vf_idx = (int)vf_num - 1;
+ cores_crashed = READ_ONCE(oct->cores_crashed);
+
if (notice == VF_DRV_LOADED) {
if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
dev_info(&oct->pci_dev->dev,
"driver for VF%d was loaded\n", vf_idx);
- try_module_get(THIS_MODULE);
+ if (!cores_crashed)
+ try_module_get(THIS_MODULE);
}
} else if (notice == VF_DRV_REMOVED) {
if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
dev_info(&oct->pci_dev->dev,
"driver for VF%d was removed\n", vf_idx);
- module_put(THIS_MODULE);
+ if (!cores_crashed)
+ module_put(THIS_MODULE);
}
} else if (notice == VF_DRV_MACADDR_CHANGED) {
u8 *b = (u8 *)&data[1];
@@ -4447,14 +4516,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (OCTEON_CN23XX_PF(octeon_dev)) {
if (!cn23xx_fw_loaded(octeon_dev)) {
fw_loaded = 0;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
- return 1;
- /* things might have changed */
- if (!cn23xx_fw_loaded(octeon_dev))
- fw_loaded = 0;
- else
- fw_loaded = 1;
+ if (!fw_type_is_none()) {
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
+ }
} else {
fw_loaded = 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7b83be4ce1fe..34c77821fad9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -16,6 +16,7 @@
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/vxlan.h>
#include "liquidio_common.h"
@@ -39,12 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-/* Bit mask values for lio->ifstate */
-#define LIO_IFSTATE_DROQ_OPS 0x01
-#define LIO_IFSTATE_REGISTERED 0x02
-#define LIO_IFSTATE_RUNNING 0x04
-#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
struct liquidio_if_cfg_context {
int octeon_id;
@@ -336,36 +331,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static int ifstate_check(struct lio *lio, int state_flag)
-{
- return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static void ifstate_set(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static void ifstate_reset(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
-/**
* \brief Stop Tx queues
* @param netdev network device
*/
@@ -506,7 +471,8 @@ static void delete_glists(struct lio *lio)
kfree(g);
} while (g);
- if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
@@ -722,13 +688,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
netif_wake_subqueue(netdev, iq->q_index);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
- } else {
- if (!octnet_iq_is_full(oct, lio->txq)) {
- INCR_INSTRQUEUE_PKT_COUNT(
- lio->oct_dev, lio->txq, tx_restart, 1);
- wake_q(netdev, lio->txq);
- }
}
+ } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, lio->txq))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq, tx_restart, 1);
+ netif_wake_queue(netdev);
}
}
@@ -780,6 +745,7 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
static int octeon_setup_interrupt(struct octeon_device *oct)
{
struct msix_entry *msix_entries;
+ char *queue_irq_names = NULL;
int num_alloc_ioq_vectors;
int num_ioq_vectors;
int irqret;
@@ -788,10 +754,25 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
if (oct->msix_on) {
oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
+ GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+
oct->msix_entries = kcalloc(
oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries)
- return 1;
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
msix_entries = (struct msix_entry *)oct->msix_entries;
@@ -805,16 +786,23 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
for (i = 0; i < num_ioq_vectors; i++) {
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+ "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, i);
+
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
- "octeon", &oct->ioq_vector[i]);
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -830,7 +818,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
@@ -975,6 +965,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
if (oct->pci_dev->reset_fn)
@@ -1163,6 +1155,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_rx_oom_poll_fn(netdev);
+
cleanup_link_status_change_wq(netdev);
delete_glists(lio);
@@ -1642,8 +1636,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no];
if (iq) {
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
+
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -2486,6 +2484,8 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
+ struct completion compl;
+ u16 response_code;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2497,14 +2497,25 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ init_completion(&compl);
+ nctrl.completion = &compl;
+ nctrl.response_code = &response_code;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
+ return -EIO;
}
- return ret;
+ if (!wait_for_completion_timeout(&compl,
+ msecs_to_jiffies(nctrl.wait_time)))
+ return -EPERM;
+
+ if (READ_ONCE(response_code))
+ return -EPERM;
+
+ return 0;
}
static int
@@ -2549,6 +2560,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
@@ -2581,6 +2594,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -3003,6 +3018,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3057,7 +3075,6 @@ setup_nic_wait_intr:
*/
static int liquidio_init_nic_module(struct octeon_device *oct)
{
- struct oct_intrmod_cfg *intrmod_cfg;
int num_nic_ports = 1;
int i, retval = 0;
@@ -3079,22 +3096,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
goto octnet_init_failure;
}
- /* Initialize interrupt moderation params */
- intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
- intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
- intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
- intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
- intrmod_cfg->tx_enable = 1;
- intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
- intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
- intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
- intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
- intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 294c6f3c6b48..8ea2323d8d67 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -27,7 +27,7 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
-#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MINOR_VERSION 5
#define LIQUIDIO_BASE_MICRO_VERSION 1
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
@@ -83,6 +83,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_INTRMOD_CFG 0x08
#define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
+#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
#define VF_DRV_LOADED 1
#define VF_DRV_REMOVED -1
#define VF_DRV_MACADDR_CHANGED 2
@@ -100,6 +101,11 @@ enum octeon_tag_type {
#define BYTES_PER_DHLEN_UNIT 8
#define MAX_REG_CNT 2000000U
+#define INTRNAMSIZ 32
+#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ)
+#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2)
+#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2)
+
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
@@ -181,6 +187,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_Q 0
/* NIC Command types */
+#define OCTNET_CMD_RESET_PF 0x0
#define OCTNET_CMD_CHANGE_MTU 0x1
#define OCTNET_CMD_CHANGE_MACADDR 0x2
#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
@@ -845,29 +852,6 @@ struct oct_mdio_cmd {
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
-/* intrmod: max. packet rate threshold */
-#define LIO_INTRMOD_MAXPKT_RATETHR 196608
-/* intrmod: min. packet rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR 9216
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
-/* intrmod: max. time to trigger interrupt */
-#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
-/* 66xx:intrmod: min. time to trigger interrupt
- * (value of 1 is optimum for TCP_RR)
- */
-#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
-
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
-
-/* intrmod: poll interval in seconds */
-#define LIO_INTRMOD_CHECK_INTERVAL 1
-
struct oct_intrmod_cfg {
u64 rx_enable;
u64 tx_enable;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 9675ffbf25e6..e21b477d0159 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 iq_no = 0;
union oct_txpciq txpciq;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
num_descs =
@@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 desc_size = 0;
u32 oq_no = 0;
- int numa_node = cpu_to_node(oq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct)) {
num_descs =
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index c301a3852482..92f67de111aa 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -453,9 +453,6 @@ struct octeon_device {
/** List of dispatch functions */
struct octeon_dispatch_list dispatch;
- /* Interrupt Moderation */
- struct oct_intrmod_cfg intrmod;
-
u32 int_status;
u64 droq_intr;
@@ -517,6 +514,9 @@ struct octeon_device {
void *msix_entries;
+ /* when requesting IRQs, the names are stored here */
+ void *irq_name_storage;
+
struct octeon_sriov_info sriov_info;
struct octeon_pf_vf_hs_word pfvf_hsword;
@@ -538,6 +538,12 @@ struct octeon_device {
u32 priv_flags;
void *watchdog_task;
+
+ u32 rx_coalesce_usecs;
+ u32 rx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames;
+
+ bool cores_crashed;
};
#define OCT_DRV_ONLINE 1
@@ -551,12 +557,6 @@ struct octeon_device {
#define CHIP_CONF(oct, TYPE) \
(((struct octeon_ ## TYPE *)((oct)->chip))->conf)
-struct oct_intrmod_cmd {
- struct octeon_device *oct_dev;
- struct octeon_soft_command *sc;
- struct oct_intrmod_cfg *cfg;
-};
-
/*------------------ Function Prototypes ----------------------*/
/** Initialize device list memory */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 79f809479af6..286be5539cef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -226,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -267,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct,
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
- set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!droq->desc_ring)
- droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
- (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -519,6 +513,32 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
return desc_refilled;
}
+/** check if we can allocate packets to get out of oom.
+ * @param droq - Droq being checked.
+ * @return does not return anything
+ */
+void octeon_droq_check_oom(struct octeon_droq *droq)
+{
+ int desc_refilled;
+ struct octeon_device *oct = droq->oct_dev;
+
+ if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
+ spin_lock_bh(&droq->lock);
+ desc_refilled = octeon_droq_refill(oct, droq);
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
+ spin_unlock_bh(&droq->lock);
+ }
+}
+
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
@@ -970,7 +990,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 6982c0af5ecc..9781577115e7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -426,4 +426,6 @@ int octeon_droq_process_packets(struct octeon_device *oct,
int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
int cmd, u32 arg);
+void octeon_droq_check_oom(struct octeon_droq *droq);
+
#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 4608a5af35a3..5063a12613e5 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -152,7 +152,7 @@ struct octeon_instr_queue {
struct oct_iq_stats stats;
/** DMA mapped base address of the input descriptor ring. */
- u64 base_addr_dma;
+ dma_addr_t base_addr_dma;
/** Application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 201b9875f9bb..5cca73b8880b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -313,6 +313,7 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox)
return 0;
}
+ spin_unlock_irqrestore(&mbox->lock, flags);
WARN_ON(1);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index eef2a1e8a7e3..bf483932ff25 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -28,6 +28,12 @@
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
@@ -123,6 +129,9 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
+ /* work queue for rxq oom status */
+ struct cavium_wq rxq_status_wq;
+
/* work queue for link status */
struct cavium_wq link_status_wq;
@@ -132,10 +141,6 @@ struct lio {
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
-#define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3))
-#define CIU3_WDOG_MASK 12ULL
-#define LIO_MONITOR_WDOG_EXPIRE 1
-#define LIO_MONITOR_CORE_STUCK_MSGD 2
#define LIO_MAX_CORES 12
/**
@@ -146,6 +151,10 @@ struct lio {
*/
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
+int setup_rx_oom_poll_fn(struct net_device *netdev);
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev);
+
/**
* \brief Link control command completion callback
* @param nctrl_ptr pointer to control packet structure
@@ -438,4 +447,34 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq,
get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
}
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 0243be8dd56f..b457cf23fce6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -100,14 +100,16 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
- /* Call the callback function if status is OK.
- * Status is OK only if a response was expected and core returned
- * success.
+ /* Call the callback function if status is zero (meaning OK) or status
+ * contains a firmware status code bigger than zero (meaning the
+ * firmware is reporting an error).
* If no response was expected, status is OK if the command was posted
* successfully.
*/
- if (!status && nctrl->cb_fn)
+ if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) {
+ nctrl->status = status;
nctrl->cb_fn(nctrl);
+ }
octeon_free_soft_command(oct, sc);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0c7a5c9b2932..6480ef863441 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -62,6 +62,10 @@ struct octnic_ctrl_pkt {
/** Callback function called when the command has been fetched */
octnic_ctrl_pkt_cb_fn_t cb_fn;
+
+ u32 status;
+ u16 *response_code;
+ struct completion *completion;
};
#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 707bc15adec6..261f448f9de2 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
@@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
iq->oct_dev = oct;
- set_dev_node(&oct->pci_dev->dev, numa_node);
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!iq->base_addr)
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
+ iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct,
void *app_ctx)
{
u32 iq_no = (u32)txpciq.s.q_no;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 2fbaae96b505..3d691c69f74d 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -69,50 +69,53 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
u32 status;
u64 status64;
- struct octeon_instr_rdp *rdp;
- u64 rptr;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
do {
spin_lock_bh(&ordered_sc_list->lock);
- if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+ if (list_empty(&ordered_sc_list->head)) {
spin_unlock_bh(&ordered_sc_list->lock);
return 1;
}
- sc = (struct octeon_soft_command *)ordered_sc_list->
- head.next;
- if (OCTEON_CN23XX_PF(octeon_dev) ||
- OCTEON_CN23XX_VF(octeon_dev)) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
- rptr = sc->cmd.cmd3.rptr;
- } else {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rptr = sc->cmd.cmd2.rptr;
- }
+ sc = list_first_entry(&ordered_sc_list->head,
+ struct octeon_soft_command, node);
status = OCTEON_REQUEST_PENDING;
/* check if octeon has finished DMA'ing a response
* to where rptr is pointing to
*/
- dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
- rptr, rdp->rlen,
- DMA_FROM_DEVICE);
status64 = *sc->status_word;
if (status64 != COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
if ((status64 & 0xff) != 0xff) {
octeon_swap_8B_data(&status64, 1);
if (((status64 & 0xff) != 0xff)) {
- status = (u32)(status64 &
- 0xffffffffULL);
+ /* retrieve 16-bit firmware status */
+ status = (u32)(status64 & 0xffffULL);
+ if (status) {
+ status =
+ FIRMWARE_STATUS_CODE(status);
+ } else {
+ /* i.e. no error */
+ status = OCTEON_REQUEST_DONE;
+ }
}
}
} else if (force_quit || (sc->timeout &&
time_after(jiffies, (unsigned long)sc->timeout))) {
+ dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
+ __func__, (long)jiffies, (long)sc->timeout);
status = OCTEON_REQUEST_TIMEOUT;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
index cbb2d84e8932..9169c2815dba 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.h
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -78,6 +78,8 @@ enum {
/*------------ Error codes used by host driver -----------------*/
#define DRIVER_MAJOR_ERROR_CODE 0x0000
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define FIRMWARE_MAJOR_ERROR_CODE 0x0001
/** A value of 0x00000000 indicates no error i.e. success */
#define DRIVER_ERROR_NONE 0x00000000
@@ -116,6 +118,9 @@ enum {
};
+#define FIRMWARE_STATUS_CODE(status) \
+ ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
/** Initialize the response lists. The number of response lists to create is
* given by count.
* @param octeon_dev - the octeon device structure.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 2269ff562d95..4a02e618e318 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -252,12 +252,14 @@ struct nicvf_drv_stats {
u64 tx_csum_overflow;
/* driver debug stats */
- u64 rcv_buffer_alloc_failures;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
+ u64 rcv_buffer_alloc_failures;
+ u64 page_alloc;
+
struct u64_stats_sync syncp;
};
@@ -266,9 +268,9 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
+ struct bpf_prog *xdp_prog;
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
- struct nicvf_cq_poll *napi[8];
void *iommu_domain;
u8 vf_id;
u8 sqs_id;
@@ -294,6 +296,7 @@ struct nicvf {
/* Queue count */
u8 rx_queues;
u8 tx_queues;
+ u8 xdp_tx_queues;
u8 max_queues;
u8 node;
@@ -318,10 +321,11 @@ struct nicvf {
struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
+ /* Napi */
+ struct nicvf_cq_poll *napi[8];
+
/* MSI-X */
- bool msix_enabled;
u8 num_vec;
- struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 767234e2e8f9..fb770b0182d3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -65,9 +65,7 @@ struct nicpf {
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
- bool msix_enabled;
u8 num_vec;
- struct msix_entry *msix_entries;
bool irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1088,7 +1086,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf, vf_per_mbx_reg = 64;
- if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+ if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
mbx = 0;
else
mbx = 1;
@@ -1107,51 +1105,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
return IRQ_HANDLED;
}
-static int nic_enable_msix(struct nicpf *nic)
-{
- int i, ret;
-
- nic->num_vec = pci_msix_vec_count(nic->pdev);
-
- nic->msix_entries = kmalloc_array(nic->num_vec,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!nic->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nic->num_vec; i++)
- nic->msix_entries[i].entry = i;
-
- ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
- if (ret) {
- dev_err(&nic->pdev->dev,
- "Request for #%d msix vectors failed, returned %d\n",
- nic->num_vec, ret);
- kfree(nic->msix_entries);
- return ret;
- }
-
- nic->msix_enabled = 1;
- return 0;
-}
-
-static void nic_disable_msix(struct nicpf *nic)
-{
- if (nic->msix_enabled) {
- pci_disable_msix(nic->pdev);
- kfree(nic->msix_entries);
- nic->msix_enabled = 0;
- nic->num_vec = 0;
- }
-}
-
static void nic_free_all_interrupts(struct nicpf *nic)
{
int irq;
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(nic->msix_entries[irq].vector, nic);
+ free_irq(pci_irq_vector(nic->pdev, irq), nic);
nic->irq_allocated[irq] = false;
}
}
@@ -1159,18 +1119,24 @@ static void nic_free_all_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
int i, ret;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
- ret = nic_enable_msix(nic);
- if (ret)
- return ret;
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ return 1;
+ }
/* Register mailbox interrupt handler */
for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(nic->msix_entries[i].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, i),
nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
@@ -1186,14 +1152,16 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
dev_err(&nic->pdev->dev, "Request irq failed\n");
nic_free_all_interrupts(nic);
- nic_disable_msix(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
return ret;
}
static void nic_unregister_interrupts(struct nicpf *nic)
{
nic_free_all_interrupts(nic);
- nic_disable_msix(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
}
static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 02a986cdbb39..b9ece9cbf98b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -100,11 +100,12 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(tx_csum_overlap),
NICVF_DRV_STAT(tx_csum_overflow),
- NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
+ NICVF_DRV_STAT(rcv_buffer_alloc_failures),
+ NICVF_DRV_STAT(page_alloc),
};
static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -720,7 +721,7 @@ static int nicvf_set_channels(struct net_device *dev,
struct nicvf *nic = netdev_priv(dev);
int err = 0;
bool if_up = netif_running(dev);
- int cqcount;
+ u8 cqcount, txq_count;
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
@@ -729,10 +730,26 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;
+ if (nic->xdp_prog &&
+ ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
if (if_up)
nicvf_stop(dev);
- cqcount = max(channel->rx_count, channel->tx_count);
+ nic->rx_queues = channel->rx_count;
+ nic->tx_queues = channel->tx_count;
+ if (!nic->xdp_prog)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = channel->rx_count;
+
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cqcount = max(nic->rx_queues, txq_count);
if (cqcount > MAX_CMP_QUEUES_PER_QS) {
nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
@@ -741,12 +758,10 @@ static int nicvf_set_channels(struct net_device *dev,
nic->sqs_count = 0;
}
- nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
- nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
- nic->rx_queues = channel->rx_count;
- nic->tx_queues = channel->tx_count;
err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err)
return err;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 24017588f531..d6477af88085 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -17,6 +17,9 @@
#include <linux/prefetch.h>
#include <linux/irq.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
#include "nic_reg.h"
#include "nic.h"
@@ -397,8 +400,10 @@ static void nicvf_request_sqs(struct nicvf *nic)
if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
- if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
- tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+ tx_queues = nic->tx_queues + nic->xdp_tx_queues;
+ if (tx_queues > MAX_SND_QUEUES_PER_QS)
+ tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
/* Set no of Rx/Tx queues in each of the SQsets */
for (sqs = 0; sqs < nic->sqs_count; sqs++) {
@@ -496,12 +501,99 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
+ struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
+ struct sk_buff **skb)
+{
+ struct xdp_buff xdp;
+ struct page *page;
+ u32 action;
+ u16 len, offset = 0;
+ u64 dma_addr, cpu_addr;
+ void *orig_data;
+
+ /* Retrieve packet buffer's DMA address and length */
+ len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
+ dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
+
+ cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
+ if (!cpu_addr)
+ return false;
+ cpu_addr = (u64)phys_to_virt(cpu_addr);
+ page = virt_to_page((void *)cpu_addr);
+
+ xdp.data_hard_start = page_address(page);
+ xdp.data = (void *)cpu_addr;
+ xdp.data_end = xdp.data + len;
+ orig_data = xdp.data;
+
+ rcu_read_lock();
+ action = bpf_prog_run_xdp(prog, &xdp);
+ rcu_read_unlock();
+
+ /* Check if XDP program has changed headers */
+ if (orig_data != xdp.data) {
+ len = xdp.data_end - xdp.data;
+ offset = orig_data - xdp.data;
+ dma_addr -= offset;
+ }
+
+ switch (action) {
+ case XDP_PASS:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ /* Build SKB and pass on packet to network stack */
+ *skb = build_skb(xdp.data,
+ RCV_FRAG_LEN - cqe_rx->align_pad + offset);
+ if (!*skb)
+ put_page(page);
+ else
+ skb_put(*skb, len);
+ return false;
+ case XDP_TX:
+ nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(action);
+ case XDP_ABORTED:
+ trace_xdp_exception(nic->netdev, prog, action);
+ case XDP_DROP:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ put_page(page);
+ return true;
+ }
+ return false;
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
- int cqe_type, int budget,
+ int budget, int *subdesc_cnt,
unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
+ struct page *page;
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
@@ -513,12 +605,26 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
return;
- netdev_dbg(nic->netdev,
- "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
- __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
- cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+ /* Check for errors */
+ if (cqe_tx->send_status)
+ nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
+
+ /* Is this a XDP designated Tx queue */
+ if (sq->is_xdp) {
+ page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
+ /* Check if it's recycled page or else unmap DMA mapping */
+ if (page && (page_ref_count(page) == 1))
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
+
+ /* Release page reference for recycling */
+ if (page)
+ put_page(page);
+ sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
+ return;
+ }
- nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -528,12 +634,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
tso_sqe->subdesc_cnt);
- nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
} else {
nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
hdr->subdesc_cnt);
}
- nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
@@ -544,7 +650,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
* a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
- nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
}
}
@@ -578,9 +684,9 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx)
+ struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
struct nicvf *snic = nic;
int err = 0;
@@ -595,16 +701,25 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
}
/* Check for errors */
- err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
- if (err && !cqe_rx->rb_cnt)
- return;
+ if (cqe_rx->err_level || cqe_rx->err_opcode) {
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+ }
- skb = nicvf_get_rcv_skb(snic, cqe_rx);
- if (!skb) {
- netdev_dbg(nic->netdev, "Packet not received\n");
- return;
+ /* For XDP, ignore pkts spanning multiple pages */
+ if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
+ /* Packet consumed by XDP */
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ return;
+ } else {
+ skb = nicvf_get_rcv_skb(snic, cqe_rx,
+ nic->xdp_prog ? true : false);
}
+ if (!skb)
+ return;
+
if (netif_msg_pktdata(nic)) {
netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
skb, skb->len);
@@ -646,13 +761,14 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
{
int processed_cqe, work_done = 0, tx_done = 0;
int cqe_count, cqe_head;
+ int subdesc_cnt = 0;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
- struct snd_queue *sq;
- unsigned int tx_pkts = 0, tx_bytes = 0;
+ struct snd_queue *sq = &qs->sq[cq_idx];
+ unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
loop:
@@ -667,8 +783,6 @@ loop:
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cqe_head &= 0xFFFF;
- netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
- __func__, cq_idx, cqe_count, cqe_head);
while (processed_cqe < cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -682,17 +796,15 @@ loop:
break;
}
- netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
- cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
work_done++;
break;
case CQE_TYPE_SEND:
- nicvf_snd_pkt_handler(netdev,
- (void *)cq_desc, CQE_TYPE_SEND,
- budget, &tx_pkts, &tx_bytes);
+ nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
+ budget, &subdesc_cnt,
+ &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -704,9 +816,6 @@ loop:
}
processed_cqe++;
}
- netdev_dbg(nic->netdev,
- "%s CQ%d processed_cqe %d work_done %d budget %d\n",
- __func__, cq_idx, processed_cqe, work_done, budget);
/* Ring doorbell to inform H/W to reuse processed CQEs */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -716,13 +825,26 @@ loop:
goto loop;
done:
+ /* Update SQ's descriptor free count */
+ if (subdesc_cnt)
+ nicvf_put_sq_desc(sq, subdesc_cnt);
+
+ txq_idx = nicvf_netdev_qidx(nic, cq_idx);
+ /* Handle XDP TX queues */
+ if (nic->pnicvf->xdp_prog) {
+ if (txq_idx < nic->pnicvf->xdp_tx_queues) {
+ nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
+ goto out;
+ }
+ nic = nic->pnicvf;
+ txq_idx -= nic->pnicvf->xdp_tx_queues;
+ }
+
/* Wakeup TXQ if its stopped earlier due to SQ full */
- sq = &nic->qs->sq[cq_idx];
if (tx_done ||
(atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
netdev = nic->pnicvf->netdev;
- txq = netdev_get_tx_queue(netdev,
- nicvf_netdev_qidx(nic, cq_idx));
+ txq = netdev_get_tx_queue(netdev, txq_idx);
if (tx_pkts)
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -735,10 +857,11 @@ done:
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
- netdev->name, cq_idx);
+ netdev->name, txq_idx);
}
}
+out:
spin_unlock_bh(&cq->lock);
return work_done;
}
@@ -882,38 +1005,9 @@ static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
return IRQ_HANDLED;
}
-static int nicvf_enable_msix(struct nicvf *nic)
-{
- int ret, vec;
-
- nic->num_vec = NIC_VF_MSIX_VECTORS;
-
- for (vec = 0; vec < nic->num_vec; vec++)
- nic->msix_entries[vec].entry = vec;
-
- ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
- if (ret) {
- netdev_err(nic->netdev,
- "Req for #%d msix vectors failed\n", nic->num_vec);
- return 0;
- }
- nic->msix_enabled = 1;
- return 1;
-}
-
-static void nicvf_disable_msix(struct nicvf *nic)
-{
- if (nic->msix_enabled) {
- pci_disable_msix(nic->pdev);
- nic->msix_enabled = 0;
- nic->num_vec = 0;
- }
-}
-
static void nicvf_set_irq_affinity(struct nicvf *nic)
{
int vec, cpu;
- int irqnum;
for (vec = 0; vec < nic->num_vec; vec++) {
if (!nic->irq_allocated[vec])
@@ -930,15 +1024,14 @@ static void nicvf_set_irq_affinity(struct nicvf *nic)
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
nic->affinity_mask[vec]);
- irqnum = nic->msix_entries[vec].vector;
- irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
+ irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
+ nic->affinity_mask[vec]);
}
}
static int nicvf_register_interrupts(struct nicvf *nic)
{
int irq, ret = 0;
- int vector;
for_each_cq_irq(irq)
sprintf(nic->irq_name[irq], "%s-rxtx-%d",
@@ -957,8 +1050,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
/* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
- vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_intr_handler,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_intr_handler,
0, nic->irq_name[irq], nic->napi[irq]);
if (ret)
goto err;
@@ -968,8 +1061,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
/* Register RBDR interrupt */
for (irq = NICVF_INTR_ID_RBDR;
irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
- vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_rbdr_intr_handler,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_rbdr_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
goto err;
@@ -981,7 +1074,7 @@ static int nicvf_register_interrupts(struct nicvf *nic)
nic->pnicvf->netdev->name,
nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR;
- ret = request_irq(nic->msix_entries[irq].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
nicvf_qs_err_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
@@ -1001,6 +1094,7 @@ err:
static void nicvf_unregister_interrupts(struct nicvf *nic)
{
+ struct pci_dev *pdev = nic->pdev;
int irq;
/* Free registered interrupts */
@@ -1008,19 +1102,20 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
if (!nic->irq_allocated[irq])
continue;
- irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
+ irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
free_cpumask_var(nic->affinity_mask[irq]);
if (irq < NICVF_INTR_ID_SQ)
- free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+ free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
else
- free_irq(nic->msix_entries[irq].vector, nic);
+ free_irq(pci_irq_vector(pdev, irq), nic);
nic->irq_allocated[irq] = false;
}
/* Disable MSI-X */
- nicvf_disable_msix(nic);
+ pci_free_irq_vectors(pdev);
+ nic->num_vec = 0;
}
/* Initialize MSIX vectors and register MISC interrupt.
@@ -1032,16 +1127,22 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
int irq = NICVF_INTR_ID_MISC;
/* Return if mailbox interrupt is already registered */
- if (nic->msix_enabled)
+ if (nic->pdev->msix_enabled)
return 0;
/* Enable MSI-X */
- if (!nicvf_enable_msix(nic))
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
return 1;
+ }
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
/* Register Misc interrupt */
- ret = request_irq(nic->msix_entries[irq].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
if (ret)
@@ -1076,6 +1177,13 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+ /* In XDP case, initial HW tx queues are used for XDP,
+ * but stack's queue mapping starts at '0', so skip the
+ * Tx queues attached to Rx queues for XDP.
+ */
+ if (nic->xdp_prog)
+ qid += nic->xdp_tx_queues;
+
snic = nic;
/* Get secondary Qset's SQ structure */
if (qid >= MAX_SND_QUEUES_PER_QS) {
@@ -1164,7 +1272,7 @@ int nicvf_stop(struct net_device *netdev)
/* Wait for pending IRQ handlers to finish */
for (irq = 0; irq < nic->num_vec; irq++)
- synchronize_irq(nic->msix_entries[irq].vector);
+ synchronize_irq(pci_irq_vector(nic->pdev, irq));
tasklet_kill(&nic->rbdr_task);
tasklet_kill(&nic->qs_err_task);
@@ -1365,7 +1473,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- if (nic->msix_enabled) {
+ if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
return -EBUSY;
} else {
@@ -1553,6 +1661,114 @@ static int nicvf_set_features(struct net_device *netdev,
return 0;
}
+static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
+{
+ u8 cq_count, txq_count;
+
+ /* Set XDP Tx queue count same as Rx queue count */
+ if (!bpf_attached)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = nic->rx_queues;
+
+ /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
+ * needs to be allocated, check how many.
+ */
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cq_count = max(nic->rx_queues, txq_count);
+ if (cq_count > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ /* Set primary Qset's resources */
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ /* Update stack */
+ nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
+}
+
+static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
+{
+ struct net_device *dev = nic->netdev;
+ bool if_up = netif_running(nic->netdev);
+ struct bpf_prog *old_prog;
+ bool bpf_attached = false;
+
+ /* For now just support only the usual MTU sized frames */
+ if (prog && (dev->mtu > 1500)) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ dev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ /* ALL SQs attached to CQs i.e same as RQs, are treated as
+ * XDP Tx queues and more Tx queues are allocated for
+ * network stack to send pkts out.
+ *
+ * No of Tx queues are either same as Rx queues or whatever
+ * is left in max no of queues possible.
+ */
+ if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
+ netdev_warn(dev,
+ "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -ENOMEM;
+ }
+
+ if (if_up)
+ nicvf_stop(nic->netdev);
+
+ old_prog = xchg(&nic->xdp_prog, prog);
+ /* Detach old prog, if any */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (nic->xdp_prog) {
+ /* Attach BPF program */
+ nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
+ if (!IS_ERR(nic->xdp_prog))
+ bpf_attached = true;
+ }
+
+ /* Calculate Tx queues needed for XDP and network stack */
+ nicvf_set_xdp_queues(nic, bpf_attached);
+
+ if (if_up) {
+ /* Reinitialize interface, clean slate */
+ nicvf_open(nic->netdev);
+ netif_trans_update(nic->netdev);
+ }
+
+ return 0;
+}
+
+static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ /* To avoid checks while retrieving buffer address from CQE_RX,
+ * do not support XDP for T88 pass1.x silicons which are anyway
+ * not in use widely.
+ */
+ if (pass1_silicon(nic->pdev))
+ return -EOPNOTSUPP;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return nicvf_xdp_setup(nic, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!nic->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1563,6 +1779,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
+ .ndo_xdp = nicvf_xdp,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1665,8 +1882,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_unregister_interrupts;
- netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_GRO |
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features |= NETIF_F_RXHASH;
@@ -1674,7 +1892,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK;
- netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index f13289f0d238..2b181762ad49 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -19,16 +19,8 @@
#include "q_struct.h"
#include "nicvf_queues.h"
-#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
-
-static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
-{
- /* Translation is installed only when IOMMU is present */
- if (nic->iommu_domain)
- return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
- return dma_addr;
-}
-
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data);
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
@@ -90,46 +82,152 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
dmem->base = NULL;
}
-/* Allocate buffer for packet reception
- * HW returns memory address where packet is DMA'ed but not a pointer
- * into RBDR ring, so save buffer address at the start of fragment and
- * align the start address to a cache aligned address
+#define XDP_PAGE_REFCNT_REFILL 256
+
+/* Allocate a new page or recycle one if possible
+ *
+ * We cannot optimize dma mapping here, since
+ * 1. It's only one RBDR ring for 8 Rx queues.
+ * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
+ * and not idx into RBDR ring, so can't refer to saved info.
+ * 3. There are multiple receive buffers per page
*/
-static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
- u32 buf_len, u64 **rbuf)
+static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
+ struct rbdr *rbdr, gfp_t gfp)
+{
+ int ref_count;
+ struct page *page = NULL;
+ struct pgcache *pgcache, *next;
+
+ /* Check if page is already allocated */
+ pgcache = &rbdr->pgcache[rbdr->pgidx];
+ page = pgcache->page;
+ /* Check if page can be recycled */
+ if (page) {
+ ref_count = page_ref_count(page);
+ /* Check if this page has been used once i.e 'put_page'
+ * called after packet transmission i.e internal ref_count
+ * and page's ref_count are equal i.e page can be recycled.
+ */
+ if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
+ pgcache->ref_count--;
+ else
+ page = NULL;
+
+ /* In non-XDP mode, page's ref_count needs to be '1' for it
+ * to be recycled.
+ */
+ if (!rbdr->is_xdp && (ref_count != 1))
+ page = NULL;
+ }
+
+ if (!page) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
+ if (!page)
+ return NULL;
+
+ this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
+
+ /* Check for space */
+ if (rbdr->pgalloc >= rbdr->pgcnt) {
+ /* Page can still be used */
+ nic->rb_page = page;
+ return NULL;
+ }
+
+ /* Save the page in page cache */
+ pgcache->page = page;
+ pgcache->dma_addr = 0;
+ pgcache->ref_count = 0;
+ rbdr->pgalloc++;
+ }
+
+ /* Take additional page references for recycling */
+ if (rbdr->is_xdp) {
+ /* Since there is single RBDR (i.e single core doing
+ * page recycling) per 8 Rx queues, in XDP mode adjusting
+ * page references atomically is the biggest bottleneck, so
+ * take bunch of references at a time.
+ *
+ * So here, below reference counts defer by '1'.
+ */
+ if (!pgcache->ref_count) {
+ pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
+ page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
+ }
+ } else {
+ /* In non-XDP case, single 64K page is divided across multiple
+ * receive buffers, so cost of recycling is less anyway.
+ * So we can do with just one extra reference.
+ */
+ page_ref_add(page, 1);
+ }
+
+ rbdr->pgidx++;
+ rbdr->pgidx &= (rbdr->pgcnt - 1);
+
+ /* Prefetch refcount of next page in page cache */
+ next = &rbdr->pgcache[rbdr->pgidx];
+ page = next->page;
+ if (page)
+ prefetch(&page->_refcount);
+
+ return pgcache;
+}
+
+/* Allocate buffer for packet reception */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
+ gfp_t gfp, u32 buf_len, u64 *rbuf)
{
- int order = NICVF_PAGE_ORDER;
+ struct pgcache *pgcache = NULL;
- /* Check if request can be accomodated in previous allocated page */
- if (nic->rb_page &&
- ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
+ /* Check if request can be accomodated in previous allocated page.
+ * But in XDP mode only one buffer per page is permitted.
+ */
+ if (!rbdr->is_xdp && nic->rb_page &&
+ ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
nic->rb_pageref++;
goto ret;
}
nicvf_get_page(nic);
+ nic->rb_page = NULL;
- /* Allocate a new page */
- nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- order);
- if (!nic->rb_page) {
+ /* Get new page, either recycled or new one */
+ pgcache = nicvf_alloc_page(nic, rbdr, gfp);
+ if (!pgcache && !nic->rb_page) {
this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
return -ENOMEM;
}
+
nic->rb_page_offset = 0;
+
+ /* Reserve space for header modifications by BPF program */
+ if (rbdr->is_xdp)
+ buf_len += XDP_PACKET_HEADROOM;
+
+ /* Check if it's recycled */
+ if (pgcache)
+ nic->rb_page = pgcache->page;
ret:
- /* HW will ensure data coherency, CPU sync not required */
- *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
+ *rbuf = pgcache->dma_addr;
+ } else {
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
nic->rb_page_offset, buf_len,
DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC));
- if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
- if (!nic->rb_page_offset)
- __free_pages(nic->rb_page, order);
- nic->rb_page = NULL;
- return -ENOMEM;
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, 0);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
+ if (pgcache)
+ pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ nic->rb_page_offset += buf_len;
}
- nic->rb_page_offset += buf_len;
return 0;
}
@@ -159,7 +257,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
int ring_len, int buf_size)
{
int idx;
- u64 *rbuf;
+ u64 rbuf;
struct rbdr_entry_t *desc;
int err;
@@ -177,10 +275,34 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr->head = 0;
rbdr->tail = 0;
+ /* Initialize page recycling stuff.
+ *
+ * Can't use single buffer per page especially with 64K pages.
+ * On embedded platforms i.e 81xx/83xx available memory itself
+ * is low and minimum ring size of RBDR is 8K, that takes away
+ * lots of memory.
+ *
+ * But for XDP it has to be a single buffer per page.
+ */
+ if (!nic->pnicvf->xdp_prog) {
+ rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
+ rbdr->is_xdp = false;
+ } else {
+ rbdr->pgcnt = ring_len;
+ rbdr->is_xdp = true;
+ }
+ rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
+ rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
+ rbdr->pgcnt, GFP_KERNEL);
+ if (!rbdr->pgcache)
+ return -ENOMEM;
+ rbdr->pgidx = 0;
+ rbdr->pgalloc = 0;
+
nic->rb_page = NULL;
for (idx = 0; idx < ring_len; idx++) {
- err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
- &rbuf);
+ err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
+ RCV_FRAG_LEN, &rbuf);
if (err) {
/* To free already allocated and mapped ones */
rbdr->tail = idx - 1;
@@ -188,7 +310,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
}
desc = GET_RBDR_DESC(rbdr, idx);
- desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
}
nicvf_get_page(nic);
@@ -201,6 +323,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
int head, tail;
u64 buf_addr, phys_addr;
+ struct pgcache *pgcache;
struct rbdr_entry_t *desc;
if (!rbdr)
@@ -216,7 +339,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
/* Release page references */
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
- buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ buf_addr = desc->buf_addr;
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
@@ -227,13 +350,31 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
}
/* Release buffer of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
- buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ buf_addr = desc->buf_addr;
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (phys_addr)
put_page(virt_to_page(phys_to_virt(phys_addr)));
+ /* Sync page cache info */
+ smp_rmb();
+
+ /* Release additional page references held for recycling */
+ head = 0;
+ while (head < rbdr->pgcnt) {
+ pgcache = &rbdr->pgcache[head];
+ if (pgcache->page && page_ref_count(pgcache->page) != 0) {
+ if (!rbdr->is_xdp) {
+ put_page(pgcache->page);
+ continue;
+ }
+ page_ref_sub(pgcache->page, pgcache->ref_count - 1);
+ put_page(pgcache->page);
+ }
+ head++;
+ }
+
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
@@ -248,7 +389,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
int refill_rb_cnt;
struct rbdr *rbdr;
struct rbdr_entry_t *desc;
- u64 *rbuf;
+ u64 rbuf;
int new_rb = 0;
refill:
@@ -269,17 +410,20 @@ refill:
else
refill_rb_cnt = qs->rbdr_len - qcount - 1;
+ /* Sync page cache info */
+ smp_rmb();
+
/* Start filling descs from tail */
tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
while (refill_rb_cnt) {
tail++;
tail &= (rbdr->dmem.q_len - 1);
- if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
break;
desc = GET_RBDR_DESC(rbdr, tail);
- desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
refill_rb_cnt--;
new_rb++;
}
@@ -362,7 +506,7 @@ static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
/* Initialize transmit queue */
static int nicvf_init_snd_queue(struct nicvf *nic,
- struct snd_queue *sq, int q_len)
+ struct snd_queue *sq, int q_len, int qidx)
{
int err;
@@ -375,17 +519,38 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
if (!sq->skbuff)
return -ENOMEM;
+
sq->head = 0;
sq->tail = 0;
- atomic_set(&sq->free_cnt, q_len - 1);
sq->thresh = SND_QUEUE_THRESH;
- /* Preallocate memory for TSO segment's header */
- sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
- q_len * TSO_HEADER_SIZE,
- &sq->tso_hdrs_phys, GFP_KERNEL);
- if (!sq->tso_hdrs)
- return -ENOMEM;
+ /* Check if this SQ is a XDP TX queue */
+ if (nic->sqs_mode)
+ qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
+ if (qidx < nic->pnicvf->xdp_tx_queues) {
+ /* Alloc memory to save page pointers for XDP_TX */
+ sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->xdp_page)
+ return -ENOMEM;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = q_len - 1;
+ sq->is_xdp = true;
+ } else {
+ sq->xdp_page = NULL;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = 0;
+ sq->is_xdp = false;
+
+ atomic_set(&sq->free_cnt, q_len - 1);
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys,
+ GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+ }
return 0;
}
@@ -411,6 +576,7 @@ void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
+ struct page *page;
struct sq_hdr_subdesc *hdr;
struct sq_hdr_subdesc *tso_sqe;
@@ -428,8 +594,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
- if (!skb)
+ if (!skb || !sq->xdp_page)
+ goto next;
+
+ page = (struct page *)sq->xdp_page[sq->head];
+ if (!page)
goto next;
+ else
+ put_page(page);
+
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
/* Check for dummy descriptor used for HW TSO offload on 88xx */
if (hdr->dont_send) {
@@ -442,12 +615,14 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
nicvf_unmap_sndq_buffers(nic, sq, sq->head,
hdr->subdesc_cnt);
}
- dev_kfree_skb_any(skb);
+ if (skb)
+ dev_kfree_skb_any(skb);
next:
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
kfree(sq->skbuff);
+ kfree(sq->xdp_page);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
@@ -838,7 +1013,7 @@ static int nicvf_alloc_resources(struct nicvf *nic)
/* Alloc send queue */
for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
- if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
goto alloc_fail;
}
@@ -876,6 +1051,7 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->rx_queues = qs->rq_cnt;
nic->tx_queues = qs->sq_cnt;
+ nic->xdp_tx_queues = 0;
return 0;
}
@@ -940,7 +1116,10 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
int qentry;
qentry = sq->tail;
- atomic_sub(desc_cnt, &sq->free_cnt);
+ if (!sq->is_xdp)
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt -= desc_cnt;
sq->tail += desc_cnt;
sq->tail &= (sq->dmem.q_len - 1);
@@ -958,7 +1137,10 @@ static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
- atomic_add(desc_cnt, &sq->free_cnt);
+ if (!sq->is_xdp)
+ atomic_add(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt += desc_cnt;
sq->head += desc_cnt;
sq->head &= (sq->dmem.q_len - 1);
}
@@ -1016,6 +1198,58 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
}
}
+/* XDP Transmit APIs */
+void nicvf_xdp_sq_doorbell(struct nicvf *nic,
+ struct snd_queue *sq, int sq_num)
+{
+ if (!sq->xdp_desc_cnt)
+ return;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, sq->xdp_desc_cnt);
+ sq->xdp_desc_cnt = 0;
+}
+
+static inline void
+nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, u64 data, int len)
+{
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+ hdr->post_cqe = 1;
+ sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
+}
+
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+ int qentry;
+
+ if (subdesc_cnt > sq->xdp_free_cnt)
+ return 0;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
+
+ sq->xdp_desc_cnt += subdesc_cnt;
+
+ return 1;
+}
+
/* Calculate no of SQ subdescriptors needed to transmit all
* segments of this TSO packet.
* Taken from 'Tilera network driver' with a minor modification.
@@ -1094,7 +1328,13 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
{
int proto;
struct sq_hdr_subdesc *hdr;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ ip.hdr = skb_network_header(skb);
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
@@ -1119,7 +1359,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
- proto = ip_hdr(skb)->protocol;
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
switch (proto) {
case IPPROTO_TCP:
hdr->csum_l4 = SEND_L4_CSUM_TCP;
@@ -1366,8 +1608,33 @@ static inline unsigned frag_num(unsigned i)
#endif
}
+static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
+ u64 buf_addr, bool xdp)
+{
+ struct page *page = NULL;
+ int len = RCV_FRAG_LEN;
+
+ if (xdp) {
+ page = virt_to_page(phys_to_virt(buf_addr));
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) != 1)
+ return;
+
+ len += XDP_PACKET_HEADROOM;
+ /* Receive buffers in XDP mode are mapped from page start */
+ dma_addr &= PAGE_MASK;
+ }
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
/* Returns SKB for a received packet */
-struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp)
{
int frag;
int payload_len = 0;
@@ -1402,10 +1669,9 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
if (!frag) {
/* First fragment */
- dma_unmap_page_attrs(&nic->pdev->dev,
- *rb_ptrs - cqe_rx->align_pad,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ phys_addr, xdp);
skb = nicvf_rb_ptr_to_skb(nic,
phys_addr - cqe_rx->align_pad,
payload_len);
@@ -1415,9 +1681,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
page = virt_to_page(phys_to_virt(phys_addr));
offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
@@ -1547,9 +1811,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- if (!cqe_rx->err_level && !cqe_rx->err_opcode)
- return 0;
-
if (netif_msg_rx_err(nic))
netdev_err(nic->netdev,
"%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
@@ -1638,8 +1899,6 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
switch (cqe_tx->send_status) {
- case CQ_TX_ERROP_GOOD:
- return 0;
case CQ_TX_ERROP_DESC_FAULT:
this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 10cb4b84625b..57858522c33c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -10,6 +10,7 @@
#define NICVF_QUEUES_H
#include <linux/netdevice.h>
+#include <linux/iommu.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -213,6 +214,12 @@ struct q_desc_mem {
void *unalign_base;
};
+struct pgcache {
+ struct page *page;
+ int ref_count;
+ u64 dma_addr;
+};
+
struct rbdr {
bool enable;
u32 dma_size;
@@ -222,6 +229,13 @@ struct rbdr {
u32 head;
u32 tail;
struct q_desc_mem dmem;
+ bool is_xdp;
+
+ /* For page recycling */
+ int pgidx;
+ int pgcnt;
+ int pgalloc;
+ struct pgcache *pgcache;
} ____cacheline_aligned_in_smp;
struct rcv_queue {
@@ -258,6 +272,10 @@ struct snd_queue {
u32 tail;
u64 *skbuff;
void *desc;
+ u64 *xdp_page;
+ u16 xdp_desc_cnt;
+ u16 xdp_free_cnt;
+ bool is_xdp;
#define TSO_HEADER_SIZE 128
/* For TSO segment's header */
@@ -301,6 +319,14 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
@@ -318,8 +344,12 @@ void nicvf_sq_free_used_descs(struct net_device *netdev,
struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num);
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len);
+void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
-struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp);
void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
index f36347237a54..e47205aa87ea 100644
--- a/drivers/net/ethernet/cavium/thunder/q_struct.h
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -359,15 +359,7 @@ union cq_desc_t {
};
struct rbdr_entry_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd0:15;
- u64 buf_addr:42;
- u64 cache_align:7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 cache_align:7;
- u64 buf_addr:42;
- u64 rsvd0:15;
-#endif
+ u64 buf_addr;
};
/* TCP reassembly context */
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 6916c62f2487..94b9482f14a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -223,7 +223,6 @@ struct port_info {
struct cmac *mac;
struct cphy *phy;
struct link_config link_config;
- struct net_device_stats netstats;
};
struct sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d8aff7a4b3c7..8623be13bf86 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -296,7 +296,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
- struct net_device_stats *ns = &p->netstats;
+ struct net_device_stats *ns = &dev->stats;
const struct cmac_statistics *pstats;
/* Do a full update of the MAC stats */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 8b395b537330..087ff0ffb597 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -72,7 +72,6 @@ struct port_info {
struct cphy phy;
struct cmac mac;
struct link_config link_config;
- struct net_device_stats netstats;
int activity;
__be32 iscsi_ipv4addr;
struct iscsi_config iscsic;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index d76491676b51..2ff6bd139c96 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1489,7 +1489,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- struct net_device_stats *ns = &pi->netstats;
+ struct net_device_stats *ns = &dev->stats;
const struct mac_stats *pstats;
spin_lock(&adapter->stats_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6faaca1b48b3..c12c4a3b82b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2338,6 +2338,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->locked = 1;
f->fs.rpttid = 1;
+ /* Save the actual tid. We need this to get the corresponding
+ * filter entry structure in filter_rpl.
+ */
+ f->tid = stid + adap->tids.ftid_base;
ret = set_filter_wr(adap, stid);
if (ret) {
clear_filter(adap, f);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 87000cd39737..0de8eb72325c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6369,7 +6369,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- unsigned int ingpad;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0_V(sge_hps) |
@@ -6389,6 +6388,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
INGPADBOUNDARY_SHIFT_X) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
} else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
* Boundary to avoid uselessly chewing up PCIe Link and Memory
@@ -6401,27 +6404,62 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* Size (the minimum unit of transfer to/from Memory). If we
* have a Padding Boundary which is smaller than the Memory
* Line Size, that'll involve a Read-Modify-Write cycle on the
- * Memory Controller which is never good. For T5 the smallest
- * Padding Boundary which we can select is 32 bytes which is
- * larger than any known Memory Controller Line Size so we'll
- * use that.
- *
- * T5 has a different interpretation of the "0" value for the
- * Packing Boundary. This corresponds to 16 bytes instead of
- * the expected 32 bytes. We never have a Packing Boundary
- * less than 32 bytes so we can't use that special value but
- * on the other hand, if we wanted 32 bytes, the best we can
- * really do is 64 bytes.
- */
- if (fl_align <= 32) {
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ pci_read_config_word(adap->pdev,
+ pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /* N.B. T5/T6 have a crazy special interpretation of the "0"
+ * value for the Packing Boundary. This corresponds to 16
+ * bytes instead of the expected 32 bytes. So if we want 32
+ * bytes, the best we can really do is 64 bytes ...
+ */
+ if (pack_align <= 16) {
+ ingpack = INGPACKBOUNDARY_16B_X;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = INGPACKBOUNDARY_64B_X;
fl_align = 64;
- fl_align_log = 6;
+ } else {
+ unsigned int pack_align_log = fls(pack_align) - 1;
+
+ ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+ fl_align = pack_align;
}
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
if (is_t5(adap->params.chip))
- ingpad = INGPCIEBOUNDARY_32B_X;
+ ingpad = INGPADBOUNDARY_32B_X;
else
- ingpad = T6_INGPADBOUNDARY_32B_X;
+ ingpad = T6_INGPADBOUNDARY_8B_X;
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -6430,8 +6468,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
- INGPACKBOUNDARY_V(fl_align_log -
- INGPACKBOUNDARY_SHIFT_X));
+ INGPACKBOUNDARY_V(ingpack));
}
/*
* Adjust various SGE Free List Host Buffer Sizes.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 36cf3073ca37..f6558cbfc54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -54,11 +54,15 @@
#define INGPADBOUNDARY_SHIFT_X 5
#define T6_INGPADBOUNDARY_SHIFT_X 3
+#define T6_INGPADBOUNDARY_8B_X 0
#define T6_INGPADBOUNDARY_32B_X 2
+#define INGPADBOUNDARY_32B_X 0
+
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
+#define INGPACKBOUNDARY_64B_X 1
/* GTS register */
#define SGE_TIMERREGS 6
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 3647b28e8de0..47384f7323ac 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1896,7 +1896,7 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct __maybe_unused of_device_id cs89x0_match[] = {
+static const struct of_device_id __maybe_unused cs89x0_match[] = {
{ .compatible = "cirrus,cs8900", },
{ .compatible = "cirrus,cs8920", },
{ },
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 127ce9707378..91b8f6f5a765 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -312,8 +312,6 @@ struct de_private {
u32 msg_enable;
- struct net_device_stats net_stats;
-
struct pci_dev *pdev;
u16 setup_frame[DE_SETUP_FRAME_WORDS];
@@ -388,14 +386,14 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
netif_warn(de, rx_err, de->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- de->net_stats.rx_length_errors++;
+ de->dev->stats.rx_length_errors++;
}
} else if (status & RxError) {
/* There was a fatal error. */
- de->net_stats.rx_errors++; /* end of a packet.*/
- if (status & 0x0890) de->net_stats.rx_length_errors++;
- if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
- if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+ de->dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) de->dev->stats.rx_length_errors++;
+ if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
+ if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
}
}
@@ -423,7 +421,7 @@ static void de_rx (struct de_private *de)
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
- de->net_stats.rx_dropped++;
+ de->dev->stats.rx_dropped++;
goto rx_next;
}
@@ -441,7 +439,7 @@ static void de_rx (struct de_private *de)
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = netdev_alloc_skb(de->dev, buflen);
if (unlikely(!copy_skb)) {
- de->net_stats.rx_dropped++;
+ de->dev->stats.rx_dropped++;
drop = 1;
rx_work = 100;
goto rx_next;
@@ -470,8 +468,8 @@ static void de_rx (struct de_private *de)
skb->protocol = eth_type_trans (skb, de->dev);
- de->net_stats.rx_packets++;
- de->net_stats.rx_bytes += skb->len;
+ de->dev->stats.rx_packets++;
+ de->dev->stats.rx_bytes += skb->len;
rc = netif_rx (skb);
if (rc == NET_RX_DROP)
drop = 1;
@@ -572,18 +570,18 @@ static void de_tx (struct de_private *de)
netif_dbg(de, tx_err, de->dev,
"tx err, status 0x%x\n",
status);
- de->net_stats.tx_errors++;
+ de->dev->stats.tx_errors++;
if (status & TxOWC)
- de->net_stats.tx_window_errors++;
+ de->dev->stats.tx_window_errors++;
if (status & TxMaxCol)
- de->net_stats.tx_aborted_errors++;
+ de->dev->stats.tx_aborted_errors++;
if (status & TxLinkFail)
- de->net_stats.tx_carrier_errors++;
+ de->dev->stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
- de->net_stats.tx_fifo_errors++;
+ de->dev->stats.tx_fifo_errors++;
} else {
- de->net_stats.tx_packets++;
- de->net_stats.tx_bytes += skb->len;
+ de->dev->stats.tx_packets++;
+ de->dev->stats.tx_bytes += skb->len;
netif_dbg(de, tx_done, de->dev,
"tx done, slot %d\n", tx_tail);
}
@@ -814,9 +812,9 @@ static void de_set_rx_mode (struct net_device *dev)
static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
{
if (unlikely(rx_missed & RxMissedOver))
- de->net_stats.rx_missed_errors += RxMissedMask;
+ de->dev->stats.rx_missed_errors += RxMissedMask;
else
- de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+ de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
}
static void __de_get_stats(struct de_private *de)
@@ -836,7 +834,7 @@ static struct net_device_stats *de_get_stats(struct net_device *dev)
__de_get_stats(de);
spin_unlock_irq(&de->lock);
- return &de->net_stats;
+ return &dev->stats;
}
static inline int de_is_running (struct de_private *de)
@@ -1348,7 +1346,7 @@ static void de_clean_rings (struct de_private *de)
struct sk_buff *skb = de->tx_skb[i].skb;
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
- de->net_stats.tx_dropped++;
+ de->dev->stats.tx_dropped++;
pci_unmap_single(de->pdev,
de->tx_skb[i].mapping,
skb->len, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1e350135f11d..778f974e2928 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -878,10 +878,10 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
dw16(ASICCtrl + 2,
@@ -903,7 +903,7 @@ tx_error (struct net_device *dev, int tx_status)
}
/* Late Collision */
if (tx_status & 0x04) {
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
/* TxReset and clear FIFO */
dw16(ASICCtrl + 2, TxReset | FIFOReset);
/* Wait reset done */
@@ -916,13 +916,8 @@ tx_error (struct net_device *dev, int tx_status)
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
-#ifdef ETHER_STATS
if (tx_status & 0x08)
- np->stats.collisions16++;
-#else
- if (tx_status & 0x08)
- np->stats.collisions++;
-#endif
+ dev->stats.collisions++;
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -952,15 +947,15 @@ receive_packet (struct net_device *dev)
break;
/* Update rx error statistics, drop packet. */
if (frame_status & RFS_Errors) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
- np->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (frame_status & RxFCSError)
- np->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
- np->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
- np->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
@@ -1096,23 +1091,23 @@ get_stats (struct net_device *dev)
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
- np->stats.rx_packets += dr32(FramesRcvOk);
- np->stats.tx_packets += dr32(FramesXmtOk);
- np->stats.rx_bytes += dr32(OctetRcvOk);
- np->stats.tx_bytes += dr32(OctetXmtOk);
+ dev->stats.rx_packets += dr32(FramesRcvOk);
+ dev->stats.tx_packets += dr32(FramesXmtOk);
+ dev->stats.rx_bytes += dr32(OctetRcvOk);
+ dev->stats.tx_bytes += dr32(OctetXmtOk);
- np->stats.multicast = dr32(McstFramesRcvdOk);
- np->stats.collisions += dr32(SingleColFrames)
+ dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
/* detailed tx errors */
stat_reg = dr16(FramesAbortXSColls);
- np->stats.tx_aborted_errors += stat_reg;
- np->stats.tx_errors += stat_reg;
+ dev->stats.tx_aborted_errors += stat_reg;
+ dev->stats.tx_errors += stat_reg;
stat_reg = dr16(CarrierSenseErrors);
- np->stats.tx_carrier_errors += stat_reg;
- np->stats.tx_errors += stat_reg;
+ dev->stats.tx_carrier_errors += stat_reg;
+ dev->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
dr32(McstOctetXmtOk);
@@ -1142,7 +1137,7 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
- return &np->stats;
+ return &dev->stats;
}
static int
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 5d8ae5320242..10e98ba33ebf 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -377,7 +377,6 @@ struct netdev_private {
void __iomem *eeprom_addr;
spinlock_t tx_lock;
spinlock_t rx_lock;
- struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int speed; /* Operating speed */
unsigned int vlan; /* VLAN Id */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d49528ad7821..50566243e6fa 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -567,6 +567,12 @@ struct be_error_recovery {
/* Ethtool priv_flags */
#define BE_DISABLE_TPE_RECOVERY 0x1
+struct be_vxlan_port {
+ struct list_head list;
+ __be16 port; /* VxLAN UDP dst port */
+ int port_aliases; /* alias count */
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -671,9 +677,9 @@ struct be_adapter {
u32 sli_family;
u8 hba_port_num;
u16 pvid;
- __be16 vxlan_port;
- int vxlan_port_count;
- int vxlan_port_aliases;
+ __be16 vxlan_port; /* offloaded vxlan port num */
+ int vxlan_port_count; /* active vxlan port count */
+ struct list_head vxlan_port_list; /* vxlan port list */
struct phy_info phy;
u8 wol_cap;
bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6be3b9aba8ed..f3a09ab55900 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3857,6 +3857,44 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
+static int be_enable_vxlan_offloads(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_vxlan_port *vxlan_port;
+ __be16 port;
+ int status;
+
+ vxlan_port = list_first_entry(&adapter->vxlan_port_list,
+ struct be_vxlan_port, list);
+ port = vxlan_port->port;
+
+ status = be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_NORMAL_TO_TUNNEL);
+ if (status) {
+ dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+ return status;
+ }
+ adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+
+ status = be_cmd_set_vxlan_port(adapter, port);
+ if (status) {
+ dev_warn(dev, "Failed to add VxLAN port\n");
+ return status;
+ }
+ adapter->vxlan_port = port;
+
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+ return 0;
+}
+
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -4903,63 +4941,59 @@ static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
* those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
- * adds more than one port, disable offloads and don't re-enable them again
- * until after all the tunnels are removed.
+ * adds more than one port, disable offloads and re-enable them again when
+ * there's only one port left. We maintain a list of ports for this purpose.
*/
static void be_work_add_vxlan_port(struct work_struct *work)
{
struct be_cmd_work *cmd_work =
container_of(work, struct be_cmd_work, work);
struct be_adapter *adapter = cmd_work->adapter;
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
__be16 port = cmd_work->info.vxlan_port;
+ struct be_vxlan_port *vxlan_port;
int status;
- if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
- adapter->vxlan_port_aliases++;
- goto done;
+ /* Bump up the alias count if it is an existing port */
+ list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
+ if (vxlan_port->port == port) {
+ vxlan_port->port_aliases++;
+ goto done;
+ }
}
+ /* Add a new port to our list. We don't need a lock here since port
+ * add/delete are done only in the context of a single-threaded work
+ * queue (be_wq).
+ */
+ vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
+ if (!vxlan_port)
+ goto done;
+
+ vxlan_port->port = port;
+ INIT_LIST_HEAD(&vxlan_port->list);
+ list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
+ adapter->vxlan_port_count++;
+
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
dev_info(dev,
"Only one UDP port supported for VxLAN offloads\n");
dev_info(dev, "Disabling VxLAN offloads\n");
- adapter->vxlan_port_count++;
goto err;
}
- if (adapter->vxlan_port_count++ >= 1)
+ if (adapter->vxlan_port_count > 1)
goto done;
- status = be_cmd_manage_iface(adapter, adapter->if_handle,
- OP_CONVERT_NORMAL_TO_TUNNEL);
- if (status) {
- dev_warn(dev, "Failed to convert normal interface to tunnel\n");
- goto err;
- }
-
- status = be_cmd_set_vxlan_port(adapter, port);
- if (status) {
- dev_warn(dev, "Failed to add VxLAN port\n");
- goto err;
- }
- adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
- adapter->vxlan_port = port;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ status = be_enable_vxlan_offloads(adapter);
+ if (!status)
+ goto done;
- dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
- be16_to_cpu(port));
- goto done;
err:
be_disable_vxlan_offloads(adapter);
done:
kfree(cmd_work);
+ return;
}
static void be_work_del_vxlan_port(struct work_struct *work)
@@ -4968,23 +5002,40 @@ static void be_work_del_vxlan_port(struct work_struct *work)
container_of(work, struct be_cmd_work, work);
struct be_adapter *adapter = cmd_work->adapter;
__be16 port = cmd_work->info.vxlan_port;
+ struct be_vxlan_port *vxlan_port;
- if (adapter->vxlan_port != port)
- goto done;
+ /* Nothing to be done if a port alias is being deleted */
+ list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
+ if (vxlan_port->port == port) {
+ if (vxlan_port->port_aliases) {
+ vxlan_port->port_aliases--;
+ goto done;
+ }
+ break;
+ }
+ }
+
+ /* No port aliases left; delete the port from the list */
+ list_del(&vxlan_port->list);
+ adapter->vxlan_port_count--;
- if (adapter->vxlan_port_aliases) {
- adapter->vxlan_port_aliases--;
+ /* Disable VxLAN offload if this is the offloaded port */
+ if (adapter->vxlan_port == vxlan_port->port) {
+ WARN_ON(adapter->vxlan_port_count);
+ be_disable_vxlan_offloads(adapter);
+ dev_info(&adapter->pdev->dev,
+ "Disabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
goto out;
}
- be_disable_vxlan_offloads(adapter);
+ /* If only 1 port is left, re-enable VxLAN offload */
+ if (adapter->vxlan_port_count == 1)
+ be_enable_vxlan_offloads(adapter);
- dev_info(&adapter->pdev->dev,
- "Disabled VxLAN offloads for UDP port %d\n",
- be16_to_cpu(port));
-done:
- adapter->vxlan_port_count--;
out:
+ kfree(vxlan_port);
+done:
kfree(cmd_work);
}
@@ -5217,15 +5268,15 @@ static bool be_err_is_recoverable(struct be_adapter *adapter)
dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
ue_err_code);
- if (jiffies - err_rec->probe_time <= initial_idle_time) {
+ if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
dev_err(&adapter->pdev->dev,
"Cannot recover within %lu sec from driver load\n",
jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
return false;
}
- if (err_rec->last_recovery_time &&
- (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
+ if (err_rec->last_recovery_time && time_before_eq(
+ jiffies - err_rec->last_recovery_time, recovery_interval)) {
dev_err(&adapter->pdev->dev,
"Cannot recover within %lu sec from last recovery\n",
jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
@@ -5626,6 +5677,7 @@ static int be_drv_init(struct be_adapter *adapter)
/* Must be a power of 2 or else MODULO will BUG_ON */
adapter->be_get_temp_freq = 64;
+ INIT_LIST_HEAD(&adapter->vxlan_port_list);
return 0;
free_rx_filter:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 23d82748f52b..e863ba74d005 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1148,14 +1148,14 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
+ ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
const void *mac;
mac = of_get_mac_address(pdev->dev.of_node);
if (mac)
- memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+ ether_addr_copy(netdev->dev_addr, mac);
priv->phy_id = -1;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f819843e2bae..659f1ad37e96 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ade6b3e4ed13..95bf5e89cfd1 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -32,6 +32,9 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/of_net.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -40,103 +43,137 @@
#define DRV_NAME "ftgmac100"
#define DRV_VERSION "0.7"
-#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
-#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
+/* Arbitrary values, I am not sure the HW has limits */
+#define MAX_RX_QUEUE_ENTRIES 1024
+#define MAX_TX_QUEUE_ENTRIES 1024
+#define MIN_RX_QUEUE_ENTRIES 32
+#define MIN_TX_QUEUE_ENTRIES 32
-#define MAX_PKT_SIZE 1518
-#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */
+/* Defaults */
+#define DEF_RX_QUEUE_ENTRIES 128
+#define DEF_TX_QUEUE_ENTRIES 128
-/******************************************************************************
- * private data
- *****************************************************************************/
-struct ftgmac100_descs {
- struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
- struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
-};
+#define MAX_PKT_SIZE 1536
+#define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
+
+/* Min number of tx ring entries before stopping queue */
+#define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
struct ftgmac100 {
+ /* Registers */
struct resource *res;
void __iomem *base;
- int irq;
-
- struct ftgmac100_descs *descs;
- dma_addr_t descs_dma_addr;
-
- struct page *rx_pages[RX_QUEUE_ENTRIES];
+ /* Rx ring */
+ unsigned int rx_q_entries;
+ struct ftgmac100_rxdes *rxdes;
+ dma_addr_t rxdes_dma;
+ struct sk_buff **rx_skbs;
unsigned int rx_pointer;
+ u32 rxdes0_edorr_mask;
+
+ /* Tx ring */
+ unsigned int tx_q_entries;
+ struct ftgmac100_txdes *txdes;
+ dma_addr_t txdes_dma;
+ struct sk_buff **tx_skbs;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
- unsigned int tx_pending;
+ u32 txdes0_edotr_mask;
+
+ /* Used to signal the reset task of ring change request */
+ unsigned int new_rx_q_entries;
+ unsigned int new_tx_q_entries;
- spinlock_t tx_lock;
+ /* Scratch page to use when rx skb alloc fails */
+ void *rx_scratch;
+ dma_addr_t rx_scratch_dma;
+ /* Component structures */
struct net_device *netdev;
struct device *dev;
struct ncsi_dev *ndev;
struct napi_struct napi;
-
+ struct work_struct reset_task;
struct mii_bus *mii_bus;
- int old_speed;
- int int_mask_all;
- bool use_ncsi;
- bool enabled;
-
- u32 rxdes0_edorr_mask;
- u32 txdes0_edotr_mask;
-};
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes, gfp_t gfp);
-
-/******************************************************************************
- * internal functions (hardware register access)
- *****************************************************************************/
-static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
-{
- iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-}
+ /* Link management */
+ int cur_speed;
+ int cur_duplex;
+ bool use_ncsi;
-static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
- unsigned int size)
-{
- size = FTGMAC100_RBSR_SIZE(size);
- iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
-}
+ /* Multicast filter settings */
+ u32 maht0;
+ u32 maht1;
-static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
- dma_addr_t addr)
-{
- iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-}
+ /* Flow control settings */
+ bool tx_pause;
+ bool rx_pause;
+ bool aneg_pause;
-static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
-{
- iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
-}
+ /* Misc */
+ bool need_mac_restart;
+ bool is_aspeed;
+};
-static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
{
struct net_device *netdev = priv->netdev;
int i;
/* NOTE: reset clears all registers */
- iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
- for (i = 0; i < 5; i++) {
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+ iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
+ priv->base + FTGMAC100_OFFSET_MACCR);
+ for (i = 0; i < 50; i++) {
unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
if (!(maccr & FTGMAC100_MACCR_SW_RST))
return 0;
- udelay(1000);
+ udelay(1);
}
- netdev_err(netdev, "software reset failed\n");
+ netdev_err(netdev, "Hardware reset failed\n");
return -EIO;
}
-static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
+{
+ u32 maccr = 0;
+
+ switch (priv->cur_speed) {
+ case SPEED_10:
+ case 0: /* no link */
+ break;
+
+ case SPEED_100:
+ maccr |= FTGMAC100_MACCR_FAST_MODE;
+ break;
+
+ case SPEED_1000:
+ maccr |= FTGMAC100_MACCR_GIGA_MODE;
+ break;
+ default:
+ netdev_err(priv->netdev, "Unknown speed %d !\n",
+ priv->cur_speed);
+ break;
+ }
+
+ /* (Re)initialize the queue pointers */
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+
+ /* The doc says reset twice with 10us interval */
+ if (ftgmac100_reset_mac(priv, maccr))
+ return -EIO;
+ usleep_range(10, 1000);
+ return ftgmac100_reset_mac(priv, maccr);
+}
+
+static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
{
unsigned int maddr = mac[0] << 8 | mac[1];
unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
@@ -145,7 +182,7 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
-static void ftgmac100_setup_mac(struct ftgmac100 *priv)
+static void ftgmac100_initial_mac(struct ftgmac100 *priv)
{
u8 mac[ETH_ALEN];
unsigned int m;
@@ -189,716 +226,833 @@ static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
return ret;
eth_commit_mac_addr_change(dev, p);
- ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr);
+ ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
return 0;
}
-static void ftgmac100_init_hw(struct ftgmac100 *priv)
-{
- /* setup ring buffer base registers */
- ftgmac100_set_rx_ring_base(priv,
- priv->descs_dma_addr +
- offsetof(struct ftgmac100_descs, rxdes));
- ftgmac100_set_normal_prio_tx_ring_base(priv,
- priv->descs_dma_addr +
- offsetof(struct ftgmac100_descs, txdes));
-
- ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
-
- iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
-
- ftgmac100_set_mac(priv, priv->netdev->dev_addr);
-}
-
-#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \
- FTGMAC100_MACCR_RXDMA_EN | \
- FTGMAC100_MACCR_TXMAC_EN | \
- FTGMAC100_MACCR_RXMAC_EN | \
- FTGMAC100_MACCR_FULLDUP | \
- FTGMAC100_MACCR_CRC_APD | \
- FTGMAC100_MACCR_RX_RUNT | \
- FTGMAC100_MACCR_RX_BROADPKT)
-
-static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
+static void ftgmac100_config_pause(struct ftgmac100 *priv)
{
- int maccr = MACCR_ENABLE_ALL;
+ u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
- switch (speed) {
- default:
- case 10:
- break;
-
- case 100:
- maccr |= FTGMAC100_MACCR_FAST_MODE;
- break;
+ /* Throttle tx queue when receiving pause frames */
+ if (priv->rx_pause)
+ fcr |= FTGMAC100_FCR_FC_EN;
- case 1000:
- maccr |= FTGMAC100_MACCR_GIGA_MODE;
- break;
- }
+ /* Enables sending pause frames when the RX queue is past a
+ * certain threshold.
+ */
+ if (priv->tx_pause)
+ fcr |= FTGMAC100_FCR_FCTHR_EN;
- iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+ iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
}
-static void ftgmac100_stop_hw(struct ftgmac100 *priv)
+static void ftgmac100_init_hw(struct ftgmac100 *priv)
{
- iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
-}
+ u32 reg, rfifo_sz, tfifo_sz;
-/******************************************************************************
- * internal functions (receive descriptor)
- *****************************************************************************/
-static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
-}
+ /* Clear stale interrupts */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
-static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
-}
+ /* Setup RX ring buffer base */
+ iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
-}
+ /* Setup TX ring buffer base */
+ iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- /* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
-}
+ /* Configure RX buffer size */
+ iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
+ priv->base + FTGMAC100_OFFSET_RBSR);
-static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
-}
+ /* Set RX descriptor autopoll */
+ iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
+ priv->base + FTGMAC100_OFFSET_APTC);
-static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
-}
+ /* Write MAC address */
+ ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
-static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
-}
+ /* Write multicast filter */
+ iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
+ iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
-static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
+ /* Configure descriptor sizes and increase burst sizes according
+ * to values in Aspeed SDK. The FIFO arbitration is enabled and
+ * the thresholds set based on the recommended values in the
+ * AST2400 specification.
+ */
+ iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
+ FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
+ FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
+ FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
+ FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
+ FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
+ FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
+ priv->base + FTGMAC100_OFFSET_DBLAC);
+
+ /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
+ * mitigation doesn't seem to provide any benefit with NAPI so leave
+ * it at that.
+ */
+ iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
+ FTGMAC100_ITC_TXINT_THR(1),
+ priv->base + FTGMAC100_OFFSET_ITC);
+
+ /* Configure FIFO sizes in the TPAFCR register */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
+ rfifo_sz = reg & 0x00000007;
+ tfifo_sz = (reg >> 3) & 0x00000007;
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
+ reg &= ~0x3f000000;
+ reg |= (tfifo_sz << 27);
+ reg |= (rfifo_sz << 24);
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
+}
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv)
+{
+ u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+
+ /* Keep the original GMAC and FAST bits */
+ maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
+
+ /* Add all the main enable bits */
+ maccr |= FTGMAC100_MACCR_TXDMA_EN |
+ FTGMAC100_MACCR_RXDMA_EN |
+ FTGMAC100_MACCR_TXMAC_EN |
+ FTGMAC100_MACCR_RXMAC_EN |
+ FTGMAC100_MACCR_CRC_APD |
+ FTGMAC100_MACCR_PHY_LINK_LEVEL |
+ FTGMAC100_MACCR_RX_RUNT |
+ FTGMAC100_MACCR_RX_BROADPKT;
+
+ /* Add other bits as needed */
+ if (priv->cur_duplex == DUPLEX_FULL)
+ maccr |= FTGMAC100_MACCR_FULLDUP;
+ if (priv->netdev->flags & IFF_PROMISC)
+ maccr |= FTGMAC100_MACCR_RX_ALL;
+ if (priv->netdev->flags & IFF_ALLMULTI)
+ maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
+ else if (netdev_mc_count(priv->netdev))
+ maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
+
+ /* Vlan filtering enabled */
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ maccr |= FTGMAC100_MACCR_RM_VLAN;
+
+ /* Hit the HW */
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
}
-static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_stop_hw(struct ftgmac100 *priv)
{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
}
-static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
{
- return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
-}
+ struct netdev_hw_addr *ha;
-static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
-}
+ priv->maht1 = 0;
+ priv->maht0 = 0;
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
-static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
+ crc_val = (~(crc_val >> 2)) & 0x3f;
+ if (crc_val >= 32)
+ priv->maht1 |= 1ul << (crc_val - 32);
+ else
+ priv->maht0 |= 1ul << (crc_val);
+ }
}
-static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
- dma_addr_t addr)
+static void ftgmac100_set_rx_mode(struct net_device *netdev)
{
- rxdes->rxdes3 = cpu_to_le32(addr);
-}
+ struct ftgmac100 *priv = netdev_priv(netdev);
-static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
-{
- return le32_to_cpu(rxdes->rxdes3);
-}
+ /* Setup the hash filter */
+ ftgmac100_calc_mc_hash(priv);
-static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
-{
- return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
- cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
-}
+ /* Interface down ? that's all there is to do */
+ if (!netif_running(netdev))
+ return;
-static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
-{
- return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
- cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
-}
+ /* Update the HW */
+ iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
+ iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
-static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
+ /* Reconfigure MACCR */
+ ftgmac100_start_hw(priv);
}
-static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
+static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp)
{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
-}
+ struct net_device *netdev = priv->netdev;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ int err;
-static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
-}
+ skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_warn(netdev, "failed to allocate rx skb\n");
+ err = -ENOMEM;
+ map = priv->rx_scratch_dma;
+ } else {
+ map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ dev_kfree_skb_any(skb);
+ map = priv->rx_scratch_dma;
+ skb = NULL;
+ err = -ENOMEM;
+ }
+ }
-static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- return &priv->rx_pages[rxdes - priv->descs->rxdes];
-}
+ /* Store skb */
+ priv->rx_skbs[entry] = skb;
-/*
- * rxdes2 is not used by hardware. We use it to keep track of page.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes,
- struct page *page)
-{
- *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
-}
+ /* Store DMA address into RX desc */
+ rxdes->rxdes3 = cpu_to_le32(map);
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- return *ftgmac100_rxdes_page_slot(priv, rxdes);
-}
+ /* Ensure the above is ordered vs clearing the OWN bit */
+ dma_wmb();
-/******************************************************************************
- * internal functions (receive)
- *****************************************************************************/
-static int ftgmac100_next_rx_pointer(int pointer)
-{
- return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
-}
+ /* Clean status (which resets own bit) */
+ if (entry == (priv->rx_q_entries - 1))
+ rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
+ else
+ rxdes->rxdes0 = 0;
-static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
-{
- priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
-}
-
-static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
-{
- return &priv->descs->rxdes[priv->rx_pointer];
+ return 0;
}
-static struct ftgmac100_rxdes *
-ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
+ unsigned int pointer)
{
- struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
-
- while (ftgmac100_rxdes_packet_ready(rxdes)) {
- if (ftgmac100_rxdes_first_segment(rxdes))
- return rxdes;
-
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- }
-
- return NULL;
+ return (pointer + 1) & (priv->rx_q_entries - 1);
}
-static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
{
struct net_device *netdev = priv->netdev;
- bool error = false;
-
- if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx err\n");
+ if (status & FTGMAC100_RXDES0_RX_ERR)
netdev->stats.rx_errors++;
- error = true;
- }
-
- if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx crc err\n");
+ if (status & FTGMAC100_RXDES0_CRC_ERR)
netdev->stats.rx_crc_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx IP checksum err\n");
-
- error = true;
- }
-
- if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx frame too long\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx runt\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx odd nibble\n");
+ if (status & (FTGMAC100_RXDES0_FTL |
+ FTGMAC100_RXDES0_RUNT |
+ FTGMAC100_RXDES0_RX_ODD_NB))
netdev->stats.rx_length_errors++;
- error = true;
- }
-
- return error;
}
-static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
{
struct net_device *netdev = priv->netdev;
- struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
- bool done = false;
+ struct ftgmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ unsigned int pointer, size;
+ u32 status, csum_vlan;
+ dma_addr_t map;
- if (net_ratelimit())
- netdev_dbg(netdev, "drop packet %p\n", rxdes);
+ /* Grab next RX descriptor */
+ pointer = priv->rx_pointer;
+ rxdes = &priv->rxdes[pointer];
- do {
- if (ftgmac100_rxdes_last_segment(rxdes))
- done = true;
+ /* Grab descriptor status */
+ status = le32_to_cpu(rxdes->rxdes0);
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- } while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+ /* Do we have a packet ? */
+ if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
+ return false;
- netdev->stats.rx_dropped++;
-}
+ /* Order subsequent reads with the test for the ready bit */
+ dma_rmb();
-static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
-{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_rxdes *rxdes;
- struct sk_buff *skb;
- bool done = false;
+ /* We don't cope with fragmented RX packets */
+ if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
+ !(status & FTGMAC100_RXDES0_LRS)))
+ goto drop;
- rxdes = ftgmac100_rx_locate_first_segment(priv);
- if (!rxdes)
- return false;
+ /* Grab received size and csum vlan field in the descriptor */
+ size = status & FTGMAC100_RXDES0_VDBC;
+ csum_vlan = le32_to_cpu(rxdes->rxdes1);
- if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
- ftgmac100_rx_drop_packet(priv);
- return true;
+ /* Any error (other than csum offload) flagged ? */
+ if (unlikely(status & RXDES0_ANY_ERROR)) {
+ /* Correct for incorrect flagging of runt packets
+ * with vlan tags... Just accept a runt packet that
+ * has been flagged as vlan and whose size is at
+ * least 60 bytes.
+ */
+ if ((status & FTGMAC100_RXDES0_RUNT) &&
+ (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
+ (size >= 60))
+ status &= ~FTGMAC100_RXDES0_RUNT;
+
+ /* Any error still in there ? */
+ if (status & RXDES0_ANY_ERROR) {
+ ftgmac100_rx_packet_error(priv, status);
+ goto drop;
+ }
}
- /* start processing */
- skb = netdev_alloc_skb_ip_align(netdev, 128);
- if (unlikely(!skb)) {
- if (net_ratelimit())
- netdev_err(netdev, "rx skb alloc failed\n");
-
- ftgmac100_rx_drop_packet(priv);
- return true;
+ /* If the packet had no skb (failed to allocate earlier)
+ * then try to allocate one and skip
+ */
+ skb = priv->rx_skbs[pointer];
+ if (!unlikely(skb)) {
+ ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+ goto drop;
}
- if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+ if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
netdev->stats.multicast++;
- /*
- * It seems that HW does checksum incorrectly with fragmented packets,
- * so we are conservative here - if HW checksum error, let software do
- * the checksum again.
+ /* If the HW found checksum errors, bounce it to software.
+ *
+ * If we didn't, we need to see if the packet was recognized
+ * by HW as one of the supported checksummed protocols before
+ * we accept the HW test results.
*/
- if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
- (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- do {
- dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
- unsigned int size;
+ if (netdev->features & NETIF_F_RXCSUM) {
+ u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
+ FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
+ FTGMAC100_RXDES1_IP_CHKSUM_ERR;
+ if ((csum_vlan & err_bits) ||
+ !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ /* Transfer received size to skb */
+ skb_put(skb, size);
- size = ftgmac100_rxdes_data_length(rxdes);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+ /* Extract vlan tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ csum_vlan & 0xffff);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += PAGE_SIZE;
+ /* Tear down DMA mapping, do necessary cache management */
+ map = le32_to_cpu(rxdes->rxdes3);
- if (ftgmac100_rxdes_last_segment(rxdes))
- done = true;
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
+ /* When we don't have an iommu, we can save cycles by not
+ * invalidating the cache for the part of the packet that
+ * wasn't received.
+ */
+ dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
+#else
+ dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+#endif
- ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- } while (!done);
+ /* Resplenish rx ring */
+ ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
- /* Small frames are copied into linear part of skb to free one page */
- if (skb->len <= 128) {
- skb->truesize -= PAGE_SIZE;
- __pskb_pull_tail(skb, skb->len);
- } else {
- /* We pull the minimum amount into linear part */
- __pskb_pull_tail(skb, ETH_HLEN);
- }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
+ netdev->stats.rx_bytes += size;
/* push packet to protocol stack */
- napi_gro_receive(&priv->napi, skb);
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
(*processed)++;
return true;
+
+ drop:
+ /* Clean rxdes0 (which resets own bit) */
+ rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+ netdev->stats.rx_dropped++;
+ return true;
}
-/******************************************************************************
- * internal functions (transmit descriptor)
- *****************************************************************************/
-static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
- struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
+ unsigned int index)
{
- /* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
- txdes->txdes1 = 0;
- txdes->txdes2 = 0;
- txdes->txdes3 = 0;
+ if (index == (priv->tx_q_entries - 1))
+ return priv->txdes0_edotr_mask;
+ else
+ return 0;
}
-static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
+ unsigned int pointer)
{
- return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+ return (pointer + 1) & (priv->tx_q_entries - 1);
}
-static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
{
- /*
- * Make sure dma own bit will not be set before any other
- * descriptor fields.
+ /* Returns the number of available slots in the TX queue
+ *
+ * This always leaves one free slot so we don't have to
+ * worry about empty vs. full, and this simplifies the
+ * test for ftgmac100_tx_buf_cleanable() below
*/
- wmb();
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+ return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
+ (priv->tx_q_entries - 1);
}
-static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
- struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
{
- txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
+ return priv->tx_pointer != priv->tx_clean_pointer;
}
-static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
+ unsigned int pointer,
+ struct sk_buff *skb,
+ struct ftgmac100_txdes *txdes,
+ u32 ctl_stat)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
-}
+ dma_addr_t map = le32_to_cpu(txdes->txdes3);
+ size_t len;
-static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
-}
+ if (ctl_stat & FTGMAC100_TXDES0_FTS) {
+ len = skb_headlen(skb);
+ dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
+ } else {
+ len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
+ dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
+ }
-static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
- unsigned int len)
-{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+ /* Free SKB on last segment */
+ if (ctl_stat & FTGMAC100_TXDES0_LTS)
+ dev_kfree_skb(skb);
+ priv->tx_skbs[pointer] = NULL;
}
-static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
-}
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ struct sk_buff *skb;
+ unsigned int pointer;
+ u32 ctl_stat;
-static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
-}
+ pointer = priv->tx_clean_pointer;
+ txdes = &priv->txdes[pointer];
-static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
-}
+ ctl_stat = le32_to_cpu(txdes->txdes0);
+ if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
+ return false;
-static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
-}
+ skb = priv->tx_skbs[pointer];
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
-static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
- dma_addr_t addr)
-{
- txdes->txdes3 = cpu_to_le32(addr);
-}
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
-static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
-{
- return le32_to_cpu(txdes->txdes3);
+ return true;
}
-/*
- * txdes2 is not used by hardware. We use it to keep track of socket buffer.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
- struct sk_buff *skb)
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
{
- txdes->txdes2 = (unsigned int)skb;
-}
+ struct net_device *netdev = priv->netdev;
-static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
-{
- return (struct sk_buff *)txdes->txdes2;
-}
+ /* Process all completed packets */
+ while (ftgmac100_tx_buf_cleanable(priv) &&
+ ftgmac100_tx_complete_packet(priv))
+ ;
-/******************************************************************************
- * internal functions (transmit)
- *****************************************************************************/
-static int ftgmac100_next_tx_pointer(int pointer)
-{
- return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+ /* Restart queue if needed */
+ smp_mb();
+ if (unlikely(netif_queue_stopped(netdev) &&
+ ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(netdev, 0);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_queue_stopped(netdev) &&
+ ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+ netif_wake_queue(netdev);
+ __netif_tx_unlock(txq);
+ }
}
-static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
{
- priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
-}
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ u8 ip_proto = ip_hdr(skb)->protocol;
-static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
-{
- priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+ *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
+ switch(ip_proto) {
+ case IPPROTO_TCP:
+ *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
+ return true;
+ case IPPROTO_UDP:
+ *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
+ return true;
+ case IPPROTO_IP:
+ return true;
+ }
+ }
+ return skb_checksum_help(skb) == 0;
}
-static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
- return &priv->descs->txdes[priv->tx_pointer];
-}
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct ftgmac100_txdes *txdes, *first;
+ unsigned int pointer, nfrags, len, i, j;
+ u32 f_ctl_stat, ctl_stat, csum_vlan;
+ dma_addr_t map;
-static struct ftgmac100_txdes *
-ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
-{
- return &priv->descs->txdes[priv->tx_clean_pointer];
-}
+ /* The HW doesn't pad small frames */
+ if (eth_skb_pad(skb)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
-static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
-{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_txdes *txdes;
- struct sk_buff *skb;
- dma_addr_t map;
+ /* Reject oversize packets */
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+ goto drop;
+ }
- if (priv->tx_pending == 0)
- return false;
+ /* Do we have a limit on #fragments ? I yet have to get a reply
+ * from Aspeed. If there's one I haven't hit it.
+ */
+ nfrags = skb_shinfo(skb)->nr_frags;
- txdes = ftgmac100_current_clean_txdes(priv);
+ /* Get header len */
+ len = skb_headlen(skb);
- if (ftgmac100_txdes_owned_by_dma(txdes))
- return false;
+ /* Map the packet head */
+ map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, map)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx packet head failed\n");
+ goto drop;
+ }
- skb = ftgmac100_txdes_get_skb(txdes);
- map = ftgmac100_txdes_get_dma_addr(txdes);
+ /* Grab the next free tx descriptor */
+ pointer = priv->tx_pointer;
+ txdes = first = &priv->txdes[pointer];
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
+ /* Setup it up with the packet head. Don't write the head to the
+ * ring just yet
+ */
+ priv->tx_skbs[pointer] = skb;
+ f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+ f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+ f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+ f_ctl_stat |= FTGMAC100_TXDES0_FTS;
+ if (nfrags == 0)
+ f_ctl_stat |= FTGMAC100_TXDES0_LTS;
+ txdes->txdes3 = cpu_to_le32(map);
+
+ /* Setup HW checksumming */
+ csum_vlan = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+ goto drop;
+
+ /* Add VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+ csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+ }
- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ txdes->txdes1 = cpu_to_le32(csum_vlan);
+
+ /* Next descriptor */
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+
+ /* Add the fragments */
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ /* Map it */
+ map = skb_frag_dma_map(priv->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, map))
+ goto dma_err;
+
+ /* Setup descriptor */
+ priv->tx_skbs[pointer] = skb;
+ txdes = &priv->txdes[pointer];
+ ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+ ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+ ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+ if (i == (nfrags - 1))
+ ctl_stat |= FTGMAC100_TXDES0_LTS;
+ txdes->txdes0 = cpu_to_le32(ctl_stat);
+ txdes->txdes1 = 0;
+ txdes->txdes3 = cpu_to_le32(map);
+
+ /* Next one */
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+ }
- dev_kfree_skb(skb);
+ /* Order the previous packet and descriptor udpates
+ * before setting the OWN bit on the first descriptor.
+ */
+ dma_wmb();
+ first->txdes0 = cpu_to_le32(f_ctl_stat);
- ftgmac100_txdes_reset(priv, txdes);
+ /* Update next TX pointer */
+ priv->tx_pointer = pointer;
- ftgmac100_tx_clean_pointer_advance(priv);
+ /* If there isn't enough room for all the fragments of a new packet
+ * in the TX ring, stop the queue. The sequence below is race free
+ * vs. a concurrent restart in ftgmac100_poll()
+ */
+ if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
+ netif_stop_queue(netdev);
+ /* Order the queue stop with the test below */
+ smp_mb();
+ if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+ netif_wake_queue(netdev);
+ }
- spin_lock(&priv->tx_lock);
- priv->tx_pending--;
- spin_unlock(&priv->tx_lock);
- netif_wake_queue(netdev);
+ /* Poke transmitter to read the updated TX descriptors */
+ iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
- return true;
-}
+ return NETDEV_TX_OK;
-static void ftgmac100_tx_complete(struct ftgmac100 *priv)
-{
- while (ftgmac100_tx_complete_packet(priv))
- ;
+ dma_err:
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx fragment failed\n");
+
+ /* Free head */
+ pointer = priv->tx_pointer;
+ ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
+ first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
+
+ /* Then all fragments */
+ for (j = 0; j < i; j++) {
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+ txdes = &priv->txdes[pointer];
+ ctl_stat = le32_to_cpu(txdes->txdes0);
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ }
+
+ /* This cannot be reached if we successfully mapped the
+ * last fragment, so we know ftgmac100_free_tx_packet()
+ * hasn't freed the skb yet.
+ */
+ drop:
+ /* Drop the packet */
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
}
-static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
- dma_addr_t map)
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_txdes *txdes;
- unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
-
- txdes = ftgmac100_current_txdes(priv);
- ftgmac100_tx_pointer_advance(priv);
-
- /* setup TX descriptor */
- ftgmac100_txdes_set_skb(txdes, skb);
- ftgmac100_txdes_set_dma_addr(txdes, map);
- ftgmac100_txdes_set_buffer_size(txdes, len);
-
- ftgmac100_txdes_set_first_segment(txdes);
- ftgmac100_txdes_set_last_segment(txdes);
- ftgmac100_txdes_set_txint(txdes);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol = skb->protocol;
-
- if (protocol == cpu_to_be16(ETH_P_IP)) {
- u8 ip_proto = ip_hdr(skb)->protocol;
-
- ftgmac100_txdes_set_ipcs(txdes);
- if (ip_proto == IPPROTO_TCP)
- ftgmac100_txdes_set_tcpcs(txdes);
- else if (ip_proto == IPPROTO_UDP)
- ftgmac100_txdes_set_udpcs(txdes);
- }
+ int i;
+
+ /* Free all RX buffers */
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
+ struct sk_buff *skb = priv->rx_skbs[i];
+ dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
+
+ if (!skb)
+ continue;
+
+ priv->rx_skbs[i] = NULL;
+ dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
}
- spin_lock(&priv->tx_lock);
- priv->tx_pending++;
- if (priv->tx_pending == TX_QUEUE_ENTRIES)
- netif_stop_queue(netdev);
+ /* Free all TX buffers */
+ for (i = 0; i < priv->tx_q_entries; i++) {
+ struct ftgmac100_txdes *txdes = &priv->txdes[i];
+ struct sk_buff *skb = priv->tx_skbs[i];
+
+ if (!skb)
+ continue;
+ ftgmac100_free_tx_packet(priv, i, skb, txdes,
+ le32_to_cpu(txdes->txdes0));
+ }
+}
- /* start transmit */
- ftgmac100_txdes_set_dma_own(txdes);
- spin_unlock(&priv->tx_lock);
+static void ftgmac100_free_rings(struct ftgmac100 *priv)
+{
+ /* Free skb arrays */
+ kfree(priv->rx_skbs);
+ kfree(priv->tx_skbs);
- ftgmac100_txdma_normal_prio_start_polling(priv);
+ /* Free descriptors */
+ if (priv->rxdes)
+ dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_rxdes),
+ priv->rxdes, priv->rxdes_dma);
+ priv->rxdes = NULL;
- return NETDEV_TX_OK;
+ if (priv->txdes)
+ dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_txdes),
+ priv->txdes, priv->txdes_dma);
+ priv->txdes = NULL;
+
+ /* Free scratch packet buffer */
+ if (priv->rx_scratch)
+ dma_free_coherent(priv->dev, RX_BUF_SIZE,
+ priv->rx_scratch, priv->rx_scratch_dma);
}
-/******************************************************************************
- * internal functions (buffer)
- *****************************************************************************/
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
{
- struct net_device *netdev = priv->netdev;
- struct page *page;
- dma_addr_t map;
+ /* Allocate skb arrays */
+ priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
+ GFP_KERNEL);
+ if (!priv->rx_skbs)
+ return -ENOMEM;
+ priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
+ GFP_KERNEL);
+ if (!priv->tx_skbs)
+ return -ENOMEM;
- page = alloc_page(gfp);
- if (!page) {
- if (net_ratelimit())
- netdev_err(netdev, "failed to allocate rx page\n");
+ /* Allocate descriptors */
+ priv->rxdes = dma_zalloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
+ if (!priv->rxdes)
+ return -ENOMEM;
+ priv->txdes = dma_zalloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
+ if (!priv->txdes)
return -ENOMEM;
- }
- map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, map))) {
- if (net_ratelimit())
- netdev_err(netdev, "failed to map rx page\n");
- __free_page(page);
+ /* Allocate scratch packet buffer */
+ priv->rx_scratch = dma_alloc_coherent(priv->dev,
+ RX_BUF_SIZE,
+ &priv->rx_scratch_dma,
+ GFP_KERNEL);
+ if (!priv->rx_scratch)
return -ENOMEM;
- }
- ftgmac100_rxdes_set_page(priv, rxdes, page);
- ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
-static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+static void ftgmac100_init_rings(struct ftgmac100 *priv)
{
+ struct ftgmac100_rxdes *rxdes = NULL;
+ struct ftgmac100_txdes *txdes = NULL;
int i;
- for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
- dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ /* Update entries counts */
+ priv->rx_q_entries = priv->new_rx_q_entries;
+ priv->tx_q_entries = priv->new_tx_q_entries;
- if (!page)
- continue;
+ if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
+ return;
- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
- __free_page(page);
+ /* Initialize RX ring */
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ rxdes = &priv->rxdes[i];
+ rxdes->rxdes0 = 0;
+ rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
}
+ /* Mark the end of the ring */
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
- for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
- struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
- dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
-
- if (!skb)
- continue;
+ if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
+ return;
- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
- kfree_skb(skb);
+ /* Initialize TX ring */
+ for (i = 0; i < priv->tx_q_entries; i++) {
+ txdes = &priv->txdes[i];
+ txdes->txdes0 = 0;
}
-
- dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
- priv->descs, priv->descs_dma_addr);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
-static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
{
int i;
- priv->descs = dma_zalloc_coherent(priv->dev,
- sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
- if (!priv->descs)
- return -ENOMEM;
-
- /* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(priv,
- &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
-
- for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
- if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
- goto err;
+ if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
+ return -ENOMEM;
}
-
- /* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(priv,
- &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
-
-err:
- ftgmac100_free_buffers(priv);
- return -ENOMEM;
}
-/******************************************************************************
- * internal functions (mdio)
- *****************************************************************************/
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- int ier;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
- if (phydev->speed == priv->old_speed)
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
return;
- priv->old_speed = phydev->speed;
-
- ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
- /* disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
- netif_stop_queue(netdev);
- ftgmac100_stop_hw(priv);
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
- netif_start_queue(netdev);
- ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, phydev->speed);
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* re-enable interrupts */
- iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+ /* Reset the adapter asynchronously */
+ schedule_work(&priv->reset_task);
}
-static int ftgmac100_mii_probe(struct ftgmac100 *priv)
+static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
{
struct net_device *netdev = priv->netdev;
struct phy_device *phydev;
@@ -910,19 +1064,25 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
}
phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
+ &ftgmac100_adjust_link, intf);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
return PTR_ERR(phydev);
}
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->advertising = phydev->supported;
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
return 0;
}
-/******************************************************************************
- * struct mii_bus functions
- *****************************************************************************/
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -994,9 +1154,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
return -EIO;
}
-/******************************************************************************
- * struct ethtool_ops functions
- *****************************************************************************/
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -1005,175 +1162,365 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
+static void ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ memset(ering, 0, sizeof(*ering));
+ ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
+ ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
+ ering->rx_pending = priv->rx_q_entries;
+ ering->tx_pending = priv->tx_q_entries;
+}
+
+static int ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
+ ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
+ ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
+ ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
+ !is_power_of_2(ering->rx_pending) ||
+ !is_power_of_2(ering->tx_pending))
+ return -EINVAL;
+
+ priv->new_rx_q_entries = ering->rx_pending;
+ priv->new_tx_q_entries = ering->tx_pending;
+ if (netif_running(netdev))
+ schedule_work(&priv->reset_task);
+
+ return 0;
+}
+
+static void ftgmac100_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ pause->autoneg = priv->aneg_pause;
+ pause->tx_pause = priv->tx_pause;
+ pause->rx_pause = priv->rx_pause;
+}
+
+static int ftgmac100_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ priv->aneg_pause = pause->autoneg;
+ priv->tx_pause = pause->tx_pause;
+ priv->rx_pause = pause->rx_pause;
+
+ if (phydev) {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pause->rx_pause) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pause->tx_pause)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+ }
+ if (netif_running(netdev)) {
+ if (phydev && priv->aneg_pause)
+ phy_start_aneg(phydev);
+ else
+ ftgmac100_config_pause(priv);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ftgmac100_ethtool_ops = {
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_ringparam = ftgmac100_get_ringparam,
+ .set_ringparam = ftgmac100_set_ringparam,
+ .get_pauseparam = ftgmac100_get_pauseparam,
+ .set_pauseparam = ftgmac100_set_pauseparam,
};
-/******************************************************************************
- * interrupt handler
- *****************************************************************************/
static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
{
struct net_device *netdev = dev_id;
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status, new_mask = FTGMAC100_INT_BAD;
- /* When running in NCSI mode, the interface should be ready for
- * receiving or transmitting NCSI packets before it's opened.
- */
- if (likely(priv->use_ncsi || netif_running(netdev))) {
- /* Disable interrupts for polling */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- napi_schedule(&priv->napi);
+ /* Fetch and clear interrupt bits, process abnormal ones */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ if (unlikely(status & FTGMAC100_INT_BAD)) {
+
+ /* RX buffer unavailable */
+ if (status & FTGMAC100_INT_NO_RXBUF)
+ netdev->stats.rx_over_errors++;
+
+ /* received packet lost due to RX FIFO full */
+ if (status & FTGMAC100_INT_RPKT_LOST)
+ netdev->stats.rx_fifo_errors++;
+
+ /* sent packet lost due to excessive TX collision */
+ if (status & FTGMAC100_INT_XPKT_LOST)
+ netdev->stats.tx_fifo_errors++;
+
+ /* AHB error -> Reset the chip */
+ if (status & FTGMAC100_INT_AHB_ERR) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "AHB bus error ! Resetting chip.\n");
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ schedule_work(&priv->reset_task);
+ return IRQ_HANDLED;
+ }
+
+ /* We may need to restart the MAC after such errors, delay
+ * this until after we have freed some Rx buffers though
+ */
+ priv->need_mac_restart = true;
+
+ /* Disable those errors until we restart */
+ new_mask &= ~status;
}
+ /* Only enable "bad" interrupts while NAPI is on */
+ iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Schedule NAPI bh */
+ napi_schedule_irqoff(&priv->napi);
+
return IRQ_HANDLED;
}
-/******************************************************************************
- * struct napi_struct functions
- *****************************************************************************/
+static bool ftgmac100_check_rx(struct ftgmac100 *priv)
+{
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
+
+ /* Do we have a packet ? */
+ return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
+}
+
static int ftgmac100_poll(struct napi_struct *napi, int budget)
{
struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
- struct net_device *netdev = priv->netdev;
- unsigned int status;
- bool completed = true;
- int rx = 0;
+ int work_done = 0;
+ bool more;
- status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ /* Handle TX completions */
+ if (ftgmac100_tx_buf_cleanable(priv))
+ ftgmac100_tx_complete(priv);
- if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
- /*
- * FTGMAC100_INT_RPKT_BUF:
- * RX DMA has received packets into RX buffer successfully
- *
- * FTGMAC100_INT_NO_RXBUF:
- * RX buffer unavailable
- */
- bool retry;
+ /* Handle RX packets */
+ do {
+ more = ftgmac100_rx_packet(priv, &work_done);
+ } while (more && work_done < budget);
- do {
- retry = ftgmac100_rx_packet(priv, &rx);
- } while (retry && rx < budget);
- if (retry && rx == budget)
- completed = false;
- }
+ /* The interrupt is telling us to kick the MAC back to life
+ * after an RX overflow
+ */
+ if (unlikely(priv->need_mac_restart)) {
+ ftgmac100_start_hw(priv);
- if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
- /*
- * FTGMAC100_INT_XPKT_ETH:
- * packet transmitted to ethernet successfully
- *
- * FTGMAC100_INT_XPKT_LOST:
- * packet transmitted to ethernet lost due to late
- * collision or excessive collision
- */
- ftgmac100_tx_complete(priv);
+ /* Re-enable "bad" interrupts */
+ iowrite32(FTGMAC100_INT_BAD,
+ priv->base + FTGMAC100_OFFSET_IER);
}
- if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
- if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
- status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
- status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
+ /* As long as we are waiting for transmit packets to be
+ * completed we keep NAPI going
+ */
+ if (ftgmac100_tx_buf_cleanable(priv))
+ work_done = budget;
+
+ if (work_done < budget) {
+ /* We are about to re-enable all interrupts. However
+ * the HW has been latching RX/TX packet interrupts while
+ * they were masked. So we clear them first, then we need
+ * to re-check if there's something to process
+ */
+ iowrite32(FTGMAC100_INT_RXTX,
+ priv->base + FTGMAC100_OFFSET_ISR);
- if (status & FTGMAC100_INT_NO_RXBUF) {
- /* RX buffer unavailable */
- netdev->stats.rx_over_errors++;
- }
+ /* Push the above (and provides a barrier vs. subsequent
+ * reads of the descriptor).
+ */
+ ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- if (status & FTGMAC100_INT_RPKT_LOST) {
- /* received packet lost due to RX FIFO full */
- netdev->stats.rx_fifo_errors++;
- }
- }
+ /* Check RX and TX descriptors for more work to do */
+ if (ftgmac100_check_rx(priv) ||
+ ftgmac100_tx_buf_cleanable(priv))
+ return budget;
- if (completed) {
+ /* deschedule NAPI */
napi_complete(napi);
/* enable all interrupts */
- iowrite32(priv->int_mask_all,
+ iowrite32(FTGMAC100_INT_ALL,
priv->base + FTGMAC100_OFFSET_IER);
}
- return rx;
+ return work_done;
}
-/******************************************************************************
- * struct net_device_ops functions
- *****************************************************************************/
-static int ftgmac100_open(struct net_device *netdev)
+static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
{
- struct ftgmac100 *priv = netdev_priv(netdev);
- unsigned int status;
+ int err = 0;
+
+ /* Re-init descriptors (adjust queue sizes) */
+ ftgmac100_init_rings(priv);
+
+ /* Realloc rx descriptors */
+ err = ftgmac100_alloc_rx_buffers(priv);
+ if (err && !ignore_alloc_err)
+ return err;
+
+ /* Reinit and restart HW */
+ ftgmac100_init_hw(priv);
+ ftgmac100_config_pause(priv);
+ ftgmac100_start_hw(priv);
+
+ /* Re-enable the device */
+ napi_enable(&priv->napi);
+ netif_start_queue(priv->netdev);
+
+ /* Enable all interrupts */
+ iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
+
+ return err;
+}
+
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+ struct net_device *netdev = priv->netdev;
int err;
- err = ftgmac100_alloc_buffers(priv);
+ netdev_dbg(netdev, "Resetting NIC...\n");
+
+ /* Lock the world */
+ rtnl_lock();
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+ if (priv->mii_bus)
+ mutex_lock(&priv->mii_bus->mdio_lock);
+
+
+ /* Check if the interface is still up */
+ if (!netif_running(netdev))
+ goto bail;
+
+ /* Stop the network stack */
+ netif_trans_update(netdev);
+ napi_disable(&priv->napi);
+ netif_tx_disable(netdev);
+
+ /* Stop and reset the MAC */
+ ftgmac100_stop_hw(priv);
+ err = ftgmac100_reset_and_config_mac(priv);
if (err) {
- netdev_err(netdev, "failed to allocate buffers\n");
- goto err_alloc;
+ /* Not much we can do ... it might come back... */
+ netdev_err(netdev, "attempting to continue...\n");
}
- err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ /* Free all rx and tx buffers */
+ ftgmac100_free_buffers(priv);
+
+ /* Setup everything again and restart chip */
+ ftgmac100_init_all(priv, true);
+
+ netdev_dbg(netdev, "Reset done !\n");
+ bail:
+ if (priv->mii_bus)
+ mutex_unlock(&priv->mii_bus->mdio_lock);
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+ rtnl_unlock();
+}
+
+static int ftgmac100_open(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ /* Allocate ring buffers */
+ err = ftgmac100_alloc_rings(priv);
if (err) {
- netdev_err(netdev, "failed to request irq %d\n", priv->irq);
- goto err_irq;
+ netdev_err(netdev, "Failed to allocate descriptors\n");
+ return err;
}
- priv->rx_pointer = 0;
- priv->tx_clean_pointer = 0;
- priv->tx_pointer = 0;
- priv->tx_pending = 0;
+ /* When using NC-SI we force the speed to 100Mbit/s full duplex,
+ *
+ * Otherwise we leave it set to 0 (no link), the link
+ * message from the PHY layer will handle setting it up to
+ * something else if needed.
+ */
+ if (priv->use_ncsi) {
+ priv->cur_duplex = DUPLEX_FULL;
+ priv->cur_speed = SPEED_100;
+ } else {
+ priv->cur_duplex = 0;
+ priv->cur_speed = 0;
+ }
- err = ftgmac100_reset_hw(priv);
+ /* Reset the hardware */
+ err = ftgmac100_reset_and_config_mac(priv);
if (err)
goto err_hw;
- ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+ /* Initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
- /* Clear stale interrupts */
- status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ /* Grab our interrupt */
+ err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
+ goto err_irq;
+ }
- if (netdev->phydev)
+ /* Start things up */
+ err = ftgmac100_init_all(priv, false);
+ if (err) {
+ netdev_err(netdev, "Failed to allocate packet buffers\n");
+ goto err_alloc;
+ }
+
+ if (netdev->phydev) {
+ /* If we have a PHY, start polling */
phy_start(netdev->phydev);
- else if (priv->use_ncsi)
+ } else if (priv->use_ncsi) {
+ /* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
- napi_enable(&priv->napi);
- netif_start_queue(netdev);
-
- /* enable all interrupts */
- iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Start the NCSI device */
- if (priv->use_ncsi) {
+ /* Start the NCSI device */
err = ncsi_start_dev(priv->ndev);
if (err)
goto err_ncsi;
}
- priv->enabled = true;
-
return 0;
-err_ncsi:
+ err_ncsi:
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-err_hw:
- free_irq(priv->irq, netdev);
-err_irq:
+ err_alloc:
ftgmac100_free_buffers(priv);
-err_alloc:
+ free_irq(netdev->irq, netdev);
+ err_irq:
+ netif_napi_del(&priv->napi);
+ err_hw:
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ ftgmac100_free_rings(priv);
return err;
}
@@ -1181,64 +1528,87 @@ static int ftgmac100_stop(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- if (!priv->enabled)
- return 0;
+ /* Note about the reset task: We are called with the rtnl lock
+ * held, so we are synchronized against the core of the reset
+ * task. We must not try to synchronously cancel it otherwise
+ * we can deadlock. But since it will test for netif_running()
+ * which has already been cleared by the net core, we don't
+ * anything special to do.
+ */
/* disable all interrupts */
- priv->enabled = false;
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
else if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
- free_irq(priv->irq, netdev);
+ free_irq(netdev->irq, netdev);
ftgmac100_free_buffers(priv);
+ ftgmac100_free_rings(priv);
return 0;
}
-static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+/* optional */
+static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void ftgmac100_tx_timeout(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- dma_addr_t map;
- if (unlikely(skb->len > MAX_PKT_SIZE)) {
- if (net_ratelimit())
- netdev_dbg(netdev, "tx packet too big\n");
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- netdev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ /* Do the reset outside of interrupt context */
+ schedule_work(&priv->reset_task);
+}
- map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, map))) {
- /* drop packet */
- if (net_ratelimit())
- netdev_err(netdev, "map socket buffer failed\n");
+static int ftgmac100_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
- netdev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ if (!netif_running(netdev))
+ return 0;
+
+ /* Update the vlan filtering bit */
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ u32 maccr;
+
+ maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ maccr |= FTGMAC100_MACCR_RM_VLAN;
+ else
+ maccr &= ~FTGMAC100_MACCR_RM_VLAN;
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
}
- return ftgmac100_xmit(priv, skb, map);
+ return 0;
}
-/* optional */
-static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ftgmac100_poll_controller(struct net_device *netdev)
{
- if (!netdev->phydev)
- return -ENXIO;
+ unsigned long flags;
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+ local_irq_save(flags);
+ ftgmac100_interrupt(netdev->irq, netdev);
+ local_irq_restore(flags);
}
+#endif
static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_open = ftgmac100_open,
@@ -1247,12 +1617,20 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ftgmac100_do_ioctl,
+ .ndo_tx_timeout = ftgmac100_tx_timeout,
+ .ndo_set_rx_mode = ftgmac100_set_rx_mode,
+ .ndo_set_features = ftgmac100_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ftgmac100_poll_controller,
+#endif
};
static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
+ int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1261,14 +1639,46 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (of_machine_is_compatible("aspeed,ast2400") ||
- of_machine_is_compatible("aspeed,ast2500")) {
+ if (priv->is_aspeed) {
/* This driver supports the old MDIO interface */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
};
+ /* Get PHY mode from device-tree */
+ if (np) {
+ /* Default to RGMII. It's a gigabit part after all */
+ phy_intf = of_get_phy_mode(np);
+ if (phy_intf < 0)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed &&
+ phy_intf != PHY_INTERFACE_MODE_RMII &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+ }
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1285,7 +1695,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
goto err_register_mdiobus;
}
- err = ftgmac100_mii_probe(priv);
+ err = ftgmac100_mii_probe(priv, phy_intf);
if (err) {
dev_err(priv->dev, "MII Probe failed!\n");
goto err_mii_probe;
@@ -1321,15 +1731,13 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
-/******************************************************************************
- * struct platform_driver functions
- *****************************************************************************/
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
int irq;
struct net_device *netdev;
struct ftgmac100 *priv;
+ struct device_node *np;
int err = 0;
if (!pdev)
@@ -1354,6 +1762,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
platform_set_drvdata(pdev, netdev);
@@ -1361,11 +1770,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dev = &pdev->dev;
-
- spin_lock_init(&priv->tx_lock);
-
- /* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1383,29 +1788,28 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_ioremap;
}
- priv->irq = irq;
+ netdev->irq = irq;
- /* MAC address from chip or random one */
- ftgmac100_setup_mac(priv);
+ /* Enable pause */
+ priv->tx_pause = true;
+ priv->rx_pause = true;
+ priv->aneg_pause = true;
- priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST |
- FTGMAC100_INT_XPKT_ETH |
- FTGMAC100_INT_XPKT_LOST |
- FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_RPKT_BUF |
- FTGMAC100_INT_NO_RXBUF);
+ /* MAC address from chip or random one */
+ ftgmac100_initial_mac(priv);
- if (of_machine_is_compatible("aspeed,ast2400") ||
- of_machine_is_compatible("aspeed,ast2500")) {
+ np = pdev->dev.of_node;
+ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
+ priv->is_aspeed = true;
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
}
- if (pdev->dev.of_node &&
- of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+ if (np && of_get_property(np, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
dev_err(&pdev->dev, "NCSI stack not enabled\n");
goto err_ncsi_dev;
@@ -1423,15 +1827,21 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
- /* We have to disable on-chip IP checksum functionality
- * when NCSI is enabled on the interface. It doesn't work
- * in that case.
- */
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
- if (priv->use_ncsi &&
- of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL))
- netdev->features &= ~NETIF_F_IP_CSUM;
+ /* Default ring sizes */
+ priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
+ priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
+ /* Base feature set */
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ /* AST2400 doesn't have working HW checksum generation */
+ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+ if (np && of_get_property(np, "no-hw-checksum", NULL))
+ netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
+ netdev->features |= netdev->hw_features;
/* register network device */
err = register_netdev(netdev);
@@ -1440,7 +1850,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_register_netdev;
}
- netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+ netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
return 0;
@@ -1467,6 +1877,12 @@ static int ftgmac100_remove(struct platform_device *pdev)
priv = netdev_priv(netdev);
unregister_netdev(netdev);
+
+ /* There's a small chance the reset task will have been re-queued,
+ * during stop, make sure it's gone before we free the structure.
+ */
+ cancel_work_sync(&priv->reset_task);
+
ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index a7ce0ac8858a..0653d8176e6a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -86,6 +86,20 @@
#define FTGMAC100_INT_PHYSTS_CHG (1 << 9)
#define FTGMAC100_INT_NO_HPTXBUF (1 << 10)
+/* Interrupts we care about in NAPI mode */
+#define FTGMAC100_INT_BAD (FTGMAC100_INT_RPKT_LOST | \
+ FTGMAC100_INT_XPKT_LOST | \
+ FTGMAC100_INT_AHB_ERR | \
+ FTGMAC100_INT_NO_RXBUF)
+
+/* Normal RX/TX interrupts, enabled when NAPI off */
+#define FTGMAC100_INT_RXTX (FTGMAC100_INT_XPKT_ETH | \
+ FTGMAC100_INT_RPKT_BUF)
+
+/* All the interrupts we care about */
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+ FTGMAC100_INT_BAD)
+
/*
* Interrupt timer control register
*/
@@ -185,13 +199,20 @@
#define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff)
/*
+ * Flow control register
+ */
+#define FTGMAC100_FCR_FC_EN (1 << 0)
+#define FTGMAC100_FCR_FCTHR_EN (1 << 2)
+#define FTGMAC100_FCR_PAUSE_TIME(x) (((x) & 0xffff) << 16)
+
+/*
* Transmit descriptor, aligned to 16 bytes
*/
struct ftgmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* not used by HW */
- unsigned int txdes3; /* TXBUF_BADR */
+ __le32 txdes0; /* Control & status bits */
+ __le32 txdes1; /* Irq, checksum and vlan control */
+ __le32 txdes2; /* Reserved */
+ __le32 txdes3; /* DMA buffer address */
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
@@ -213,10 +234,10 @@ struct ftgmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftgmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* not used by HW */
- unsigned int rxdes3; /* RXBUF_BADR */
+ __le32 rxdes0; /* Control & status bits */
+ __le32 rxdes1; /* Checksum and vlan status */
+ __le32 rxdes2; /* length/type on AST2500 */
+ __le32 rxdes3; /* DMA buffer address */
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
@@ -234,6 +255,14 @@ struct ftgmac100_rxdes {
#define FTGMAC100_RXDES0_FRS (1 << 29)
#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31)
+/* Errors we care about for dropping packets */
+#define RXDES0_ANY_ERROR ( \
+ FTGMAC100_RXDES0_RX_ERR | \
+ FTGMAC100_RXDES0_CRC_ERR | \
+ FTGMAC100_RXDES0_FTL | \
+ FTGMAC100_RXDES0_RUNT | \
+ FTGMAC100_RXDES0_RX_ODD_NB)
+
#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff
#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20)
#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e2ca107f9d94..9a520e4f0df9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* L4 Type field: TCP */
#define FM_L4_PARSE_RESULT_TCP 0x20
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
* For conformity, we'll still declare GSO explicitly.
*/
net_dev->features |= NETIF_F_GSO;
+ net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* we do not want shared skbs on TX */
@@ -334,6 +342,45 @@ static void dpaa_get_stats64(struct net_device *net_dev,
}
}
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ u8 num_tc;
+ int i;
+
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = tc->mqprio->num_tc;
+
+ if (num_tc == priv->num_tc)
+ return 0;
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ goto out;
+ }
+
+ if (num_tc > DPAA_TC_NUM) {
+ netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
+ DPAA_TC_NUM);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
+ i * DPAA_TC_TXQ_NUM);
+
+out:
+ priv->num_tc = num_tc ? : 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ return 0;
+}
+
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{
struct platform_device *of_dev;
@@ -557,16 +604,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
/* Use multiple WQs for FQ assignment:
* - Tx Confirmation queues go to WQ1.
- * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- * to be scheduled, in case there are many more FQs in WQ3).
- * - Rx Default and Tx queues go to WQ3 (no differentiation between
- * Rx and Tx traffic).
+ * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ6).
+ * - Rx Default goes to WQ6.
+ * - Tx queues go to different WQs depending on their priority. Equal
+ * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
+ * WQ0 (highest priority).
* This ensures that Tx-confirmed buffers are timely released. In particular,
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
* are greatly outnumbered by other FQs in the system, while
* dequeue scheduling is round-robin.
*/
-static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{
switch (fq->fq_type) {
case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +624,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
break;
case FQ_TYPE_RX_ERROR:
case FQ_TYPE_TX_ERROR:
- fq->wq = 2;
+ fq->wq = 5;
break;
case FQ_TYPE_RX_DEFAULT:
+ fq->wq = 6;
+ break;
case FQ_TYPE_TX:
- fq->wq = 3;
+ switch (idx / DPAA_TC_TXQ_NUM) {
+ case 0:
+ /* Low priority (best effort) */
+ fq->wq = 6;
+ break;
+ case 1:
+ /* Medium priority */
+ fq->wq = 2;
+ break;
+ case 2:
+ /* High priority */
+ fq->wq = 1;
+ break;
+ case 3:
+ /* Very high priority */
+ fq->wq = 0;
+ break;
+ default:
+ WARN(1, "Too many TX FQs: more than %d!\n",
+ DPAA_ETH_TXQ_NUM);
+ }
break;
default:
WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +678,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
}
for (i = 0; i < count; i++)
- dpaa_assign_wq(dpaa_fq + i);
+ dpaa_assign_wq(dpaa_fq + i, i);
return dpaa_fq;
}
@@ -903,7 +974,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* Tx Confirmation FQs.
*/
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
/* FQ placement */
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
@@ -985,7 +1056,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
- initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
+ QM_FQCTRL_CTXASTASHING);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1127,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
return err;
}
-static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_params params;
@@ -1076,23 +1148,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
@@ -1120,32 +1198,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpaa_buffer_layout *buf_layout,
- struct device *dev)
+static int dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
{
struct fman_port *rxport = mac_dev->port[RX];
struct fman_port *txport = mac_dev->port[TX];
+ int err;
+
+ err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ if (err)
+ return err;
+
+ err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
- dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- port_fqs->tx_defq, &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
+ return err;
}
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1616,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
return skb;
}
+static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
+{
+ /* The parser has run and performed L4 checksum validation.
+ * We know there were no parser errors (and implicitly no
+ * L4 csum error), otherwise we wouldn't be here.
+ */
+ if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
+ (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
+ return CHECKSUM_UNNECESSARY;
+
+ /* We're here because either the parser didn't run or the L4 checksum
+ * was not verified. This may include the case of a UDP frame with
+ * checksum zero or an L4 proto other than TCP/UDP
+ */
+ return CHECKSUM_NONE;
+}
+
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb.
@@ -1556,7 +1663,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
return skb;
@@ -1616,7 +1723,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
if (WARN_ON(unlikely(!skb)))
goto free_buffers;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
/* Make sure forwarded skbs will have enough space
* on Tx, if extra headers are added.
@@ -2093,7 +2200,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_addr_t addr = qm_fd_addr(fd);
enum qm_fd_format fd_format;
struct net_device *net_dev;
- u32 fd_status = fd->status;
+ u32 fd_status;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
unsigned int skb_len;
@@ -2350,6 +2457,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
.ndo_do_ioctl = dpaa_ioctl,
+ .ndo_setup_tc = dpaa_setup_tc,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2732,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
- &priv->buf_layout[0], dev);
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
+ if (err)
+ goto init_ports_failed;
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
if (!priv->percpu_priv) {
@@ -2638,6 +2748,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+ priv->num_tc = 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
if (err < 0)
@@ -2658,6 +2771,7 @@ netdev_init_failed:
napi_add_failed:
dpaa_napi_del(net_dev);
alloc_percpu_failed:
+init_ports_failed:
dpaa_fq_free(dev, &priv->dpaa_fq_list);
fq_alloc_failed:
qman_delete_cgr_safe(&priv->ingress_cgr);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 1f9aebf3f3c5..9941a7866ebe 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -39,7 +39,12 @@
#include "mac.h"
#include "dpaa_eth_trace.h"
-#define DPAA_ETH_TXQ_NUM NR_CPUS
+/* Number of prioritised traffic classes */
+#define DPAA_TC_NUM 4
+/* Number of Tx queues per traffic class */
+#define DPAA_TC_TXQ_NUM NR_CPUS
+/* Total number of Tx queues */
+#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
@@ -152,6 +157,7 @@ struct dpaa_priv {
u16 channel;
struct list_head dpaa_fq_list;
+ u8 num_tc;
u32 msg_enable; /* net_device message level */
struct {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 91a16641e851..a92bf94f8e94 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -117,8 +117,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6ul-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
}, {
/* sentinel */
}
@@ -235,14 +236,14 @@ static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
struct bufdesc_prop *bd)
{
return (bdp >= bd->last) ? bd->base
- : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
+ : (struct bufdesc *)(((void *)bdp) + bd->dsize);
}
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
struct bufdesc_prop *bd)
{
return (bdp <= bd->base) ? bd->last
- : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
+ : (struct bufdesc *)(((void *)bdp) - bd->dsize);
}
static int fec_enet_get_bd_index(struct bufdesc *bdp,
@@ -1266,7 +1267,7 @@ skb_done:
}
}
- /* ERR006538: Keep the transmitter going */
+ /* ERR006358: Keep the transmitter going */
if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
writel(0, txq->bd.reg_desc_active);
@@ -2651,7 +2652,7 @@ static void fec_enet_free_queue(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i];
- dma_free_coherent(NULL,
+ dma_free_coherent(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
txq->tso_hdrs,
txq->tso_hdrs_dma);
@@ -2685,7 +2686,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
txq->tx_wake_threshold =
(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
- txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma,
GFP_KERNEL);
@@ -3187,7 +3188,7 @@ static int fec_enet_init(struct net_device *ndev)
}
#ifdef CONFIG_OF
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
bool active_high = false;
@@ -3195,16 +3196,18 @@ static void fec_reset_phy(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
if (!np)
- return;
+ return 0;
- of_property_read_u32(np, "phy-reset-duration", &msec);
+ err = of_property_read_u32(np, "phy-reset-duration", &msec);
/* A sane reset duration should not be longer than 1s */
- if (msec > 1000)
+ if (!err && msec > 1000)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (!gpio_is_valid(phy_reset))
- return;
+ if (phy_reset == -EPROBE_DEFER)
+ return phy_reset;
+ else if (!gpio_is_valid(phy_reset))
+ return 0;
active_high = of_property_read_bool(np, "phy-reset-active-high");
@@ -3213,7 +3216,7 @@ static void fec_reset_phy(struct platform_device *pdev)
"phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return;
+ return err;
}
if (msec > 20)
@@ -3222,14 +3225,17 @@ static void fec_reset_phy(struct platform_device *pdev)
usleep_range(msec * 1000, msec * 1000 + 1000);
gpio_set_value_cansleep(phy_reset, !active_high);
+
+ return 0;
}
#else /* CONFIG_OF */
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
* by machine code.
*/
+ return 0;
}
#endif /* CONFIG_OF */
@@ -3400,6 +3406,7 @@ fec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to enable phy regulator: %d\n", ret);
+ clk_disable_unprepare(fep->clk_ipg);
goto failed_regulator;
}
} else {
@@ -3412,7 +3419,9 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- fec_reset_phy(pdev);
+ ret = fec_reset_phy(pdev);
+ if (ret)
+ goto failed_reset;
if (fep->bufdesc_ex)
fec_ptp_init(pdev);
@@ -3473,8 +3482,10 @@ failed_init:
fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+failed_reset:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
failed_regulator:
- clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f60845f0c6ca..4aefe2438969 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -59,6 +59,7 @@
#define DMA_OFFSET 0x000C2000
#define FPM_OFFSET 0x000C3000
#define IMEM_OFFSET 0x000C4000
+#define HWP_OFFSET 0x000C7000
#define CGP_OFFSET 0x000DB000
/* Exceptions bit map */
@@ -218,6 +219,9 @@
#define QMI_GS_HALT_NOT_BUSY 0x00000002
+/* HWP defines */
+#define HWP_RPIMAC_PEN 0x00000001
+
/* IRAM defines */
#define IRAM_IADD_AIE 0x80000000
#define IRAM_READY 0x80000000
@@ -475,6 +479,12 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
+struct fman_hwp_regs {
+ u32 res0000[0x844 / 4]; /* 0x000..0x843 */
+ u32 fmprrpimac; /* FM Parser Internal memory access control */
+ u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
+};
+
/* Structure that holds current FMan state.
* Used for saving run time information.
*/
@@ -606,6 +616,7 @@ struct fman {
struct fman_bmi_regs __iomem *bmi_regs;
struct fman_qmi_regs __iomem *qmi_regs;
struct fman_dma_regs __iomem *dma_regs;
+ struct fman_hwp_regs __iomem *hwp_regs;
fman_exceptions_cb *exception_cb;
fman_bus_error_cb *bus_error_cb;
/* Spinlock for FMan use */
@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
+static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
+{
+ /* enable HW Parser */
+ iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
+}
+
static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
state->max_num_of_open_dmas = 32;
state->fm_port_num_of_cg = 256;
state->num_of_rx_ports = 6;
- state->total_fifo_size = 122 * 1024;
+ state->total_fifo_size = 136 * 1024;
break;
case 2:
@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
fman->bmi_regs = base_addr + BMI_OFFSET;
fman->qmi_regs = base_addr + QMI_OFFSET;
fman->dma_regs = base_addr + DMA_OFFSET;
+ fman->hwp_regs = base_addr + HWP_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
+ /* Init HW Parser */
+ hwp_init(fman->hwp_regs);
+
err = enable(fman, cfg);
if (err != 0)
return err;
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 57aae8d17d77..f53e1473dbcc 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -134,14 +134,14 @@ enum fman_exceptions {
struct fman_prs_result {
u8 lpid; /* Logical port id */
u8 shimr; /* Shim header result */
- u16 l2r; /* Layer 2 result */
- u16 l3r; /* Layer 3 result */
+ __be16 l2r; /* Layer 2 result */
+ __be16 l3r; /* Layer 3 result */
u8 l4r; /* Layer 4 result */
u8 cplan; /* Classification plan id */
- u16 nxthdr; /* Next Header */
- u16 cksum; /* Running-sum */
+ __be16 nxthdr; /* Next Header */
+ __be16 cksum; /* Running-sum */
/* Flags&fragment-offset field of the last IP-header */
- u16 flags_frag_off;
+ __be16 flags_frag_off;
/* Routing type field of a IPV6 routing extension header */
u8 route_type;
/* Routing Extension Header Present; last bit is IP valid */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 84ea130eed36..98bba10fc38c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -381,6 +381,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
/* check RGMII support */
if (iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID ||
iface == PHY_INTERFACE_MODE_RMII)
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
return -EINVAL;
@@ -390,7 +393,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
return -EINVAL;
- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+ is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID;
is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index cd6a53eaf161..c0296880feba 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -443,7 +443,10 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
break;
default:
tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII)
+ if (phy_if == PHY_INTERFACE_MODE_RGMII ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
}
iowrite32be(tmp, &regs->if_mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 173d8e0fd716..c4a66469a907 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -36,6 +36,7 @@
#include "fman_mac.h"
#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 9f3bb50a2365..57bf44fa16a1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -62,6 +62,7 @@
#define BMI_PORT_REGS_OFFSET 0
#define QMI_PORT_REGS_OFFSET 0x400
+#define HWP_PORT_REGS_OFFSET 0x800
/* Default values */
#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
@@ -182,7 +183,7 @@
#define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
-
+#define NIA_ENG_HWP 0x00440000
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
};
+#define HWP_HXS_COUNT 16
+#define HWP_HXS_PHE_REPORT 0x00000800
+#define HWP_HXS_PCAC_PSTAT 0x00000100
+#define HWP_HXS_PCAC_PSTOP 0x00000001
+struct fman_port_hwp_regs {
+ struct {
+ u32 ssa; /* Soft Sequence Attachment */
+ u32 lcv; /* Line-up Enable Confirmation Mask */
+ } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
+ u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
+ u32 fmpr_pcac; /* Configuration Access Control */
+};
+
/* QMI dequeue prefetch modes */
enum fman_port_deq_prefetch {
FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
@@ -436,6 +450,7 @@ struct fman_port {
union fman_port_bmi_regs __iomem *bmi_regs;
struct fman_port_qmi_regs __iomem *qmi_regs;
+ struct fman_port_hwp_regs __iomem *hwp_regs;
struct fman_sp_buffer_offsets buffer_offsets;
@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
/* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ tmp |= NIA_ENG_HWP;
iowrite32be(tmp, &regs->fmbm_rfne);
+ /* Parser Next Engine NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
+
/* Enqueue NIA */
iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
return 0;
}
+static void stop_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout stopping HW Parser\n");
+}
+
+static void start_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(0, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout starting HW Parser\n");
+}
+
+static void init_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int i;
+
+ stop_port_hwp(port);
+
+ for (i = 0; i < HWP_HXS_COUNT; i++) {
+ /* enable HXS error reporting into FD[STATUS] PHE */
+ iowrite32be(0x00000000, &regs->pmda[i].ssa);
+ iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+ }
+
+ start_port_hwp(port);
+}
+
static int init(struct fman_port *port)
{
int err;
@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
err = init_bmi_rx(port);
+ if (!err)
+ init_hwp(port);
break;
case FMAN_PORT_TYPE_TX:
err = init_bmi_tx(port);
@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
/* Init QMI registers */
err = init_qmi(port);
- return err;
+ if (err)
+ return err;
return 0;
}
@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Allocate the FM driver's parameters structure */
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
if (!port->cfg)
- goto err_params;
+ return -EINVAL;
/* Initialize FM port parameters which will be kept by the driver */
port->port_type = port->dts_params.type;
@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* set memory map pointers */
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+ port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */
@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
err_port_cfg:
kfree(port->cfg);
-err_params:
- kfree(port);
return -EINVAL;
}
EXPORT_SYMBOL(fman_port_config);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index db9c0bcf54cd..1fc27c97e3b2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -38,12 +38,6 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
#include "fs_enet.h"
#include "fec.h"
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 96d44cf44fe0..64300ac13e02 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -37,12 +37,6 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
#include "fs_enet.h"
/*************************************************/
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b6ed818f78ff..9d9b6e6dd988 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -9,9 +9,9 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-
#include "hnae.h"
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
@@ -57,11 +57,15 @@ static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
{
+ if (unlikely(!cb->priv))
+ return;
+
if (cb->type == DESC_TYPE_SKB)
dev_kfree_skb_any((struct sk_buff *)cb->priv);
else if (unlikely(is_rx_ring(ring)))
put_page((struct page *)cb->priv);
- memset(cb, 0, sizeof(*cb));
+
+ cb->priv = NULL;
}
static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
@@ -197,6 +201,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q;
ring->flags = flags;
+ spin_lock_init(&ring->lock);
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
/* not matter for tx or rx ring, the ntc and ntc start from 0 */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 8016854796fb..04211ac73b36 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -67,6 +67,8 @@ do { \
#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
#define AE_NAME_SIZE 16
+#define BD_SIZE_2048_MAX_MTU 6000
+
/* some said the RX and TX RCB format should not be the same in the future. But
* it is the same now...
*/
@@ -101,7 +103,6 @@ enum hnae_led_state {
#define HNS_RX_FLAG_L4ID_TCP 0x1
#define HNS_RX_FLAG_L4ID_SCTP 0x3
-
#define HNS_TXD_ASID_S 0
#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
#define HNS_TXD_BUFNUM_S 8
@@ -273,6 +274,9 @@ struct hnae_ring {
/* statistic */
struct ring_stats stats;
+ /* ring lock for poll one */
+ spinlock_t lock;
+
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
@@ -483,11 +487,11 @@ struct hnae_ae_ops {
u32 auto_neg, u32 rx_en, u32 tx_en);
void (*get_coalesce_usecs)(struct hnae_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
- void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
- u32 *tx_frames, u32 *rx_frames);
+ void (*get_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
- u32 coalesce_frames);
+ u32 tx_frames, u32 rx_frames);
void (*get_coalesce_range)(struct hnae_handle *handle,
u32 *tx_frames_low, u32 *rx_frames_low,
u32 *tx_frames_high, u32 *rx_frames_high,
@@ -646,6 +650,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++)
+ ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++) {
+ ring->desc_cb[j].page_offset = 0;
+ if (ring->desc[j].addr !=
+ cpu_to_le64(ring->desc_cb[j].dma))
+ ring->desc[j].addr =
+ cpu_to_le64(ring->desc_cb[j].dma);
+ }
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
#define hnae_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 0a9cdf00b31a..ff864a187d5a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -267,8 +267,32 @@ static int hns_ae_clr_multicast(struct hnae_handle *handle)
static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct hnae_queue *q;
+ u32 rx_buf_size;
+ int i, ret;
+
+ /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+ if (new_mtu <= BD_SIZE_2048_MAX_MTU)
+ rx_buf_size = 2048;
+ else
+ rx_buf_size = 4096;
+ } else {
+ rx_buf_size = mac_cb->dsaf_dev->buf_size;
+ }
+
+ ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
- return hns_mac_set_mtu(mac_cb, new_mtu);
+ if (!ret) {
+ /* reinit ring buf_size */
+ for (i = 0; i < handle->q_num; i++) {
+ q = handle->qs[i];
+ q->rx_ring.buf_size = rx_buf_size;
+ hns_rcb_set_rx_ring_bs(q, rx_buf_size);
+ }
+ }
+
+ return ret;
}
static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
@@ -463,15 +487,21 @@ static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
ring_pair->port_id_in_comm);
}
-static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
- u32 *tx_frames, u32 *rx_frames)
+static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
{
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
- ring_pair->port_id_in_comm);
- *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames = hns_rcb_get_rx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ else
+ *tx_frames = hns_rcb_get_tx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
ring_pair->port_id_in_comm);
}
@@ -485,15 +515,34 @@ static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
}
-static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
- u32 coalesce_frames)
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames)
{
+ int ret;
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- return hns_rcb_set_coalesced_frames(
- ring_pair->rcb_common,
- ring_pair->port_id_in_comm, coalesce_frames);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG) {
+ if (tx_frames != rx_frames)
+ return -EINVAL;
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ } else {
+ if (tx_frames != 1)
+ return -EINVAL;
+ ret = hns_rcb_set_tx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, tx_frames);
+ if (ret)
+ return ret;
+
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ }
}
static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
@@ -504,20 +553,27 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
{
struct dsaf_device *dsaf_dev;
+ assert(handle);
+
dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
- *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
- *tx_frames_high =
- (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
- HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
- *rx_frames_high =
- (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
- HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
- *tx_usecs_low = 0;
- *rx_usecs_low = 0;
- *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
- *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+ *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
+ *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
+ HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ else
+ *tx_frames_high = 1;
+
+ *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
+ HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
+ *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
+ *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
+ *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
void hns_ae_update_stats(struct hnae_handle *handle,
@@ -802,8 +858,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
- memcpy(indir, ppe_cb->rss_indir_table,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir)
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
return 0;
}
@@ -814,15 +871,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
/* set the RSS Hash Key if specififed by the user */
- if (key)
- hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
+ if (key) {
+ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+ }
- /* update the shadow RSS table with user specified qids */
- memcpy(ppe_cb->rss_indir_table, indir,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir) {
+ /* update the shadow RSS table with user specified qids */
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
- /* now update the hardware */
- hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ /* now update the hardware */
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
return 0;
}
@@ -846,7 +907,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_autoneg = hns_ae_get_autoneg,
.set_pauseparam = hns_ae_set_pauseparam,
.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
- .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+ .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
.get_coalesce_range = hns_ae_get_coalesce_range,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 3382441fe7b5..86944bc3b273 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
}
-/**
-*hns_gmac_get_en - get port enable
-*@mac_drv:mac device
-*@rx:rx enable
-*@tx:tx enable
-*/
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -148,6 +147,17 @@ static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
GMAC_MAX_FRM_SIZE_S, newval);
}
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -250,7 +260,6 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
u32 full_duplex)
{
- u32 tx_ctrl;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
@@ -279,14 +288,6 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
return -EINVAL;
}
- tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
- dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
- dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
- dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-
- dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
- GMAC_MODE_CHANGE_EB_B, 1);
-
return 0;
}
@@ -325,6 +326,17 @@ static void hns_gmac_init(void *mac_drv)
hns_gmac_tx_loop_pkt_dis(mac_drv);
if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
hns_gmac_set_uc_match(mac_drv, 0);
+
+ hns_gmac_config_pad_and_crc(mac_drv, 1);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ /* reduce gmac tx water line to avoid gmac hang-up
+ * in speed 100M and duplex half.
+ */
+ dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
+ GMAC_TX_WATER_LINE_SHIFT, 8);
}
void hns_gmac_update_stats(void *mac_drv)
@@ -453,24 +465,6 @@ static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
return 0;
}
-static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
-{
- u32 tx_ctrl;
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
- dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
- dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
- dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-}
-
-static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- *mac_id = drv->mac_id;
-}
-
static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
{
enum hns_gmac_duplex_mdoe duplex;
@@ -672,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
static int hns_gmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_gmac_stats_string);
return 0;
@@ -712,7 +706,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
- mac_drv->mac_get_id = hns_gmac_get_id;
mac_drv->get_info = hns_gmac_get_info;
mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index bdd8cdd732fb..8b5cdf490850 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -335,44 +335,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
return 0;
}
-/**
- *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
- * address twice
- *@net_dev: net device
- *@vfn : vf lan
- *@mac : mac address
- *return status
- */
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
-{
- struct mac_entry_idx *old_mac;
- struct dsaf_device *dsaf_dev;
- u32 ret;
-
- dsaf_dev = mac_cb->dsaf_dev;
-
- if (vfn < DSAF_MAX_VM_NUM) {
- old_mac = &mac_cb->addr_entry_idx[vfn];
- } else {
- dev_err(mac_cb->dev,
- "vf queue is too large, %s mac%d queue = %#x!\n",
- mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
- return -EINVAL;
- }
-
- if (dsaf_dev) {
- ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
- mac_cb->mac_id, old_mac->addr);
- if (ret)
- return ret;
-
- if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
- old_mac->valid = 0;
- }
-
- return 0;
-}
-
int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
{
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
@@ -494,10 +456,9 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
}
}
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
- u32 buf_size = mac_cb->dsaf_dev->buf_size;
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
MAC_MAX_MTU : MAC_MAX_MTU_V2;
@@ -735,6 +696,8 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
rc = phy_device_register(phy);
if (rc) {
phy_device_free(phy);
+ dev_err(&mdio->dev, "registered phy fail at address %i\n",
+ addr);
return -ENODEV;
}
@@ -745,7 +708,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return 0;
}
-static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
{
struct acpi_reference_args args;
struct platform_device *pdev;
@@ -755,24 +718,39 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
/* Loop over the child nodes and register a phy_device for each one */
if (!to_acpi_device_node(mac_cb->fw_port))
- return;
+ return -ENODEV;
rc = acpi_node_get_property_reference(
mac_cb->fw_port, "mdio-node", 0, &args);
if (rc)
- return;
+ return rc;
addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
if (addr < 0)
- return;
+ return addr;
/* dev address in adev */
pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
+ if (!pdev) {
+ dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
+ mac_cb->mac_id);
+ return -EINVAL;
+ }
+
mii_bus = platform_get_drvdata(pdev);
+ if (!mii_bus) {
+ dev_err(mac_cb->dev,
+ "mac%d mdio is NULL, dsaf will probe again later\n",
+ mac_cb->mac_id);
+ return -EPROBE_DEFER;
+ }
+
rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
if (!rc)
dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
mac_cb->mac_id, addr);
+
+ return rc;
}
#define MAC_MEDIA_TYPE_MAX_LEN 16
@@ -793,7 +771,7 @@ static const struct {
*@np:device node
* return: 0 --success, negative --fail
*/
-static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
struct device_node *np;
struct regmap *syscon;
@@ -903,7 +881,15 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
}
}
} else if (is_acpi_node(mac_cb->fw_port)) {
- hns_mac_register_phy(mac_cb);
+ ret = hns_mac_register_phy(mac_cb);
+ /*
+ * Mac can work well if there is phy or not.If the port don't
+ * connect with phy, the return value will be ignored. Only
+ * when there is phy but can't find mdio bus, the return value
+ * will be handled.
+ */
+ if (ret == -EPROBE_DEFER)
+ return ret;
} else {
dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
mac_cb->mac_id);
@@ -1065,6 +1051,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
dsaf_dev->mac_cb[port_id] = mac_cb;
}
}
+
/* init mac_cb for all port */
for (port_id = 0; port_id < max_port_num; port_id++) {
mac_cb = dsaf_dev->mac_cb[port_id];
@@ -1074,6 +1061,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
if (ret)
return ret;
+
ret = hns_mac_init_ex(mac_cb);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 2bb3d1e93c64..24dfba53a0f2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -373,8 +373,6 @@ struct mac_driver {
void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
void (*set_promiscuous)(void *mac_drv, u8 enable);
- /* get mac id */
- void (*mac_get_id)(void *mac_drv, u8 *mac_id);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
void (*autoneg_stat)(void *mac_drv, u32 *enable);
@@ -436,7 +434,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
void hns_mac_start(struct hns_mac_cb *mac_cb);
void hns_mac_stop(struct hns_mac_cb *mac_cb);
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
void hns_mac_uninit(struct dsaf_device *dsaf_dev);
void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_reset(struct hns_mac_cb *mac_cb);
@@ -444,7 +441,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 403ea9db6dbd..e0bc79ea3d88 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -510,10 +510,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
@@ -521,10 +521,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
@@ -1648,87 +1648,6 @@ int hns_dsaf_rm_mac_addr(
mac_entry->addr);
}
-/**
- * hns_dsaf_set_mac_mc_entry - set mac mc-entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mc-mac entry
- */
-int hns_dsaf_set_mac_mc_entry(
- struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_drv_priv *priv =
- (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
- struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /* mac addr check */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
- mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /* entry ie exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*if hasnot, find enpty entry*/
- entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*if hasnot empty, error*/
- dev_err(dsaf_dev->dev,
- "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name,
- mac_key.high.val, mac_key.low.val);
- return -EINVAL;
- }
-
- /* config hardware entry */
- memset(mac_data.tbl_mcast_port_msk,
- 0, sizeof(mac_data.tbl_mcast_port_msk));
- } else {
- /* config hardware entry */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
- &mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
- }
- mac_data.tbl_mcast_old_en = 0;
- mac_data.tbl_mcast_item_vld = 1;
- dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
- 0x3F, 0, mac_entry->port_mask[0]);
-
- dev_dbg(dsaf_dev->dev,
- "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
-
- hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL,
- &mac_data);
-
- /* config software entry */
- soft_mac_entry += entry_index;
- soft_mac_entry->index = entry_index;
- soft_mac_entry->tcam_key.high.val = mac_key.high.val;
- soft_mac_entry->tcam_key.low.val = mac_key.low.val;
-
- return 0;
-}
-
static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
{
u16 *a = (u16 *)dst;
@@ -2090,166 +2009,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
return ret;
}
-/**
- * hns_dsaf_get_mac_uc_entry - get mac uc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_single_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_ucast_cfg mac_data;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /* check macaddr */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
- MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
- mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /*check exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*find none, error */
- dev_err(dsaf_dev->dev,
- "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name,
- mac_key.high.val, mac_key.low.val);
- return -EINVAL;
- }
- dev_dbg(dsaf_dev->dev,
- "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- /* read entry */
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_num = mac_data.tbl_ucast_out_port;
-
- return 0;
-}
-
-/**
- * hns_dsaf_get_mac_mc_entry - get mac mc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /*check mac addr */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
- MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
- mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /*check exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /* find none, error */
- dev_err(dsaf_dev->dev,
- "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val);
- return -EINVAL;
- }
- dev_dbg(dsaf_dev->dev,
- "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- /*read entry */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
- return 0;
-}
-
-/**
- * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
- * @dsaf_dev: dsa fabric device struct pointer
- * @entry_index: tab entry index
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_entry_by_index(
- struct dsaf_device *dsaf_dev,
- u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
- struct dsaf_tbl_tcam_data tcam_data;
- char mac_addr[ETH_ALEN] = {0};
-
- if (entry_index >= dsaf_dev->tcam_max_num) {
- /* find none, del error */
- dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
- dsaf_dev->ae_dev.name);
- return -EINVAL;
- }
-
- /* mc entry, do read opt */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
-
- /***get mac addr*/
- mac_addr[0] = mac_key.high.bits.mac_0;
- mac_addr[1] = mac_key.high.bits.mac_1;
- mac_addr[2] = mac_key.high.bits.mac_2;
- mac_addr[3] = mac_key.high.bits.mac_3;
- mac_addr[4] = mac_key.low.bits.mac_4;
- mac_addr[5] = mac_key.low.bits.mac_5;
- /**is mc or uc*/
- if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
- MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
- /**mc donot do*/
- } else {
- /*is not mc, just uc... */
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data,
- &mac_uc_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
- }
-
- return 0;
-}
-
static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
size_t sizeof_priv)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cef6bf46ae93..4507e8222683 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -68,7 +68,7 @@ enum dsaf_roce_qos_sl {
};
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
-#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
@@ -429,23 +429,12 @@ static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
u8 in_port_num, u8 *addr);
int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-int hns_dsaf_get_mac_entry_by_index(
- struct dsaf_device *dsaf_dev,
- u16 entry_index,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
@@ -475,5 +464,4 @@ int hns_dsaf_rm_mac_addr(
int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 6ea872287307..b62816c1574e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
int hns_ppe_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ETH_PPE_STATIC_NUM;
return 0;
}
@@ -496,21 +496,23 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
*/
int hns_ppe_init(struct dsaf_device *dsaf_dev)
{
- int i, k;
int ret;
+ int i;
for (i = 0; i < HNS_PPE_COM_NUM; i++) {
ret = hns_ppe_common_get_cfg(dsaf_dev, i);
if (ret)
- goto get_ppe_cfg_fail;
+ goto get_cfg_fail;
ret = hns_rcb_common_get_cfg(dsaf_dev, i);
if (ret)
- goto get_rcb_cfg_fail;
+ goto get_cfg_fail;
hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
- hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ ret = hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ if (ret)
+ goto get_cfg_fail;
}
for (i = 0; i < HNS_PPE_COM_NUM; i++)
@@ -518,13 +520,12 @@ int hns_ppe_init(struct dsaf_device *dsaf_dev)
return 0;
-get_rcb_cfg_fail:
- hns_ppe_common_free_cfg(dsaf_dev, i);
-get_ppe_cfg_fail:
- for (k = i - 1; k >= 0; k--) {
- hns_rcb_common_free_cfg(dsaf_dev, k);
- hns_ppe_common_free_cfg(dsaf_dev, k);
+get_cfg_fail:
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
}
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index f0ed80d6ef9c..6f3570cfb501 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -32,6 +32,9 @@
#define RCB_RESET_WAIT_TIMES 30
#define RCB_RESET_TRY_TIMES 10
+/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
+#define RCB_DEFAULT_BUFFER_SIZE 2048
+
/**
*hns_rcb_wait_fbd_clean - clean fbd
*@qs: ring struct pointer array
@@ -192,6 +195,30 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
wmb(); /* Sync point after breakpoint */
}
+/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
/**
*hns_rcb_ring_init - init rcb ring
*@ring_pair: ring pair control block
@@ -200,8 +227,6 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
{
struct hnae_queue *q = &ring_pair->q;
- struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
- u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
struct hnae_ring *ring =
(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
dma_addr_t dma = ring->desc_dma_addr;
@@ -212,8 +237,8 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
- bd_size_type);
+ hns_rcb_set_rx_ring_bs(q, ring->buf_size);
+
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
@@ -224,12 +249,12 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
- bd_size_type);
+ hns_rcb_set_tx_ring_bs(q, ring->buf_size);
+
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
- ring_pair->port_id_in_comm);
+ ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
}
}
@@ -259,13 +284,27 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
static void hns_rcb_set_port_timeout(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
timeout * HNS_RCB_CLK_FREQ_MHZ);
- else
+ } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ HNS_RCB_DEF_GAP_TIME_USECS);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ timeout);
+
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ } else {
dsaf_write_dev(rcb_common,
RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
timeout);
+ }
}
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -327,8 +366,12 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
for (i = 0; i < port_num; i++) {
hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
- (void)hns_rcb_set_coalesced_frames(
- rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+ hns_rcb_set_rx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
+ !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ hns_rcb_set_tx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
hns_rcb_set_port_timeout(
rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
}
@@ -380,7 +423,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
struct hnae_ring *ring;
struct rcb_common_cb *rcb_common;
struct ring_pair_cb *ring_pair_cb;
- u32 buf_size;
u16 desc_num, mdnum_ppkt;
bool irq_idx, is_ver1;
@@ -401,7 +443,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
}
rcb_common = ring_pair_cb->rcb_common;
- buf_size = rcb_common->dsaf_dev->buf_size;
desc_num = rcb_common->dsaf_dev->desc_num;
ring->desc = NULL;
@@ -410,7 +451,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
ring->irq = ring_pair_cb->virq[irq_idx];
ring->desc_dma_addr = 0;
- ring->buf_size = buf_size;
+ ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
ring->desc_num = desc_num;
ring->max_desc_num_per_pkt = mdnum_ppkt;
ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
@@ -430,7 +471,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
-
return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
@@ -452,7 +492,7 @@ static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
*hns_rcb_get_cfg - get rcb config
*@rcb_common: rcb common device
*/
-void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
{
struct ring_pair_cb *ring_pair_cb;
u32 i;
@@ -477,26 +517,48 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
platform_get_irq(pdev, base_irq_idx + i * 3);
+ if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
+ (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
ring_pair_cb->q.phy_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
hns_rcb_ring_pair_get_cfg(ring_pair_cb);
}
+
+ return 0;
}
/**
- *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*
*Returns: coalesced_frames
*/
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx)
{
return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
}
/**
+ *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ u64 reg;
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ return dsaf_read_dev(rcb_common, reg);
+}
+
+/**
*hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
*@rcb_common: rcb_common device
*@port_idx:port id in comm
@@ -538,33 +600,47 @@ int hns_rcb_set_coalesce_usecs(
return -EINVAL;
}
}
- if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
dev_err(rcb_common->dsaf_dev->dev,
- "error: coalesce_usecs setting supports 0~1023us\n");
+ "error: coalesce_usecs setting supports 1~1023us\n");
return -EINVAL;
}
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
+}
- if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
- if (timeout == 0)
- /* set timeout to 0, Disable gap time */
- dsaf_set_reg_field(rcb_common->io_base,
- RCB_INT_GAP_TIME_REG + port_idx * 4,
- PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
- 0);
- else
- /* set timeout non 0, restore gap time to 1 */
- dsaf_set_reg_field(rcb_common->io_base,
- RCB_INT_GAP_TIME_REG + port_idx * 4,
- PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
- 1);
+/**
+ *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
+ u64 reg;
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames != 1) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support tx coalesce_frames setting!\n");
+ return -EINVAL;
}
- hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ dsaf_write_dev(rcb_common, reg, coalesced_frames);
return 0;
}
/**
- *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*@coalesced_frames:tx/rx BD num for coalesced frames
@@ -572,10 +648,11 @@ int hns_rcb_set_coalesce_usecs(
* Returns:
* Zero for success, or an error code in case of failure
*/
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
- u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
+ u32 old_waterline =
+ hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
if (coalesced_frames == old_waterline)
return 0;
@@ -799,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
*/
int hns_rcb_get_ring_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return HNS_RING_STATIC_REG_NUM;
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 99b4e1ba0a94..602816498c8d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -35,12 +35,23 @@ struct rcb_common_cb;
#define HNS_RCB_REG_OFFSET 0x10000
+#define HNS_RCB_TX_FRAMES_LOW 1
+#define HNS_RCB_RX_FRAMES_LOW 1
+#define HNS_RCB_TX_FRAMES_HIGH 1023
+#define HNS_RCB_RX_FRAMES_HIGH 1023
+#define HNS_RCB_TX_USECS_LOW 1
+#define HNS_RCB_RX_USECS_LOW 1
+#define HNS_RCB_TX_USECS_HIGH 1023
+#define HNS_RCB_RX_USECS_HIGH 1023
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
-#define HNS_RCB_DEF_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS 50
+#define HNS_RCB_DEF_COALESCED_USECS 30
+#define HNS_RCB_DEF_GAP_TIME_USECS 20
+#define HNS_RCB_TX_PKTLINE_OFFSET 8
#define HNS_RCB_COMMON_ENDIAN 1
@@ -110,7 +121,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
-void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
@@ -125,13 +136,17 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx);
int hns_rcb_set_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+int hns_rcb_set_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
@@ -146,4 +161,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
+
#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 8fa18fc17cd2..46a52d9bb196 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -421,7 +421,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
-#define RCB_INT_GAP_TIME_REG 0x9400
+#define RCB_PORT_INT_GAPTIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
@@ -466,6 +466,7 @@
#define GMAC_DUPLEX_TYPE_REG 0x0008UL
#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_TX_WATER_LINE_REG 0x0010UL
#define GMAC_FC_TX_TIMER_REG 0x001CUL
#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
@@ -912,6 +913,9 @@
#define GMAC_DUPLEX_TYPE_B 0
+#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1)
+#define GMAC_TX_WATER_LINE_SHIFT 0
+
#define GMAC_FC_TX_TIMER_S 0
#define GMAC_FC_TX_TIMER_M 0xffff
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index aae830a93050..51e7e9f5af49 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -300,18 +300,6 @@ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
}
/**
- *hns_xgmac_get_id - get xgmac port id
- *@mac_drv: mac driver
- *@newval:xgmac max frame length
- */
-static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- *mac_id = drv->mac_id;
-}
-
-/**
*hns_xgmac_config_max_frame_length - set xgmac max frame length
*@mac_drv: mac driver
*@newval:xgmac max frame length
@@ -793,7 +781,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
*/
static int hns_xgmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
@@ -833,7 +821,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_half_duplex = NULL;
mac_drv->set_rx_ignore_pause_frames =
hns_xgmac_set_rx_ignore_pause_frames;
- mac_drv->mac_get_id = hns_xgmac_get_id;
mac_drv->mac_free = hns_xgmac_free;
mac_drv->adjust_link = NULL;
mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index fca37e2c7f01..c6700b91a2df 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
int last_offset;
bool twobufs;
- twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+ twobufs = ((PAGE_SIZE < 8192) &&
+ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
@@ -859,7 +860,7 @@ out:
return recv_pkts;
}
-static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
@@ -873,22 +874,23 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
- napi_schedule(&ring_data->napi);
+ return false;
+ } else {
+ return true;
}
}
-static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int num = 0;
+ int num;
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
- if (num == 0)
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring, 0);
+ if (!num)
+ return true;
else
- napi_schedule(&ring_data->napi);
+ return false;
}
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
@@ -921,12 +923,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
/* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER
-#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
-#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
+#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
#else
-#define NETIF_TX_LOCK(ndev)
-#define NETIF_TX_UNLOCK(ndev)
+#define NETIF_TX_LOCK(ring)
+#define NETIF_TX_UNLOCK(ring)
#endif
+
/* reclaim all desc in one budget
* return error or number of desc left
*/
@@ -940,13 +943,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ndev);
+ NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) {
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */
}
@@ -954,7 +957,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++;
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
return -EIO;
}
@@ -966,7 +969,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
@@ -989,7 +992,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
return 0;
}
-static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head;
@@ -1002,20 +1005,21 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
- napi_schedule(&ring_data->napi);
+ return false;
+ } else {
+ return true;
}
}
-static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head == ring->next_to_clean)
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring, 0);
+ return true;
else
- napi_schedule(&ring_data->napi);
+ return false;
}
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
@@ -1026,7 +1030,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ndev);
+ NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0;
@@ -1034,7 +1038,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue);
@@ -1042,15 +1046,23 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
static int hns_nic_common_poll(struct napi_struct *napi, int budget)
{
+ int clean_complete = 0;
struct hns_nic_ring_data *ring_data =
container_of(napi, struct hns_nic_ring_data, napi);
- int clean_complete = ring_data->poll_one(
- ring_data, budget, ring_data->ex_process);
+ struct hnae_ring *ring = ring_data->ring;
- if (clean_complete >= 0 && clean_complete < budget) {
- napi_complete(napi);
- ring_data->fini_process(ring_data);
- return 0;
+try_again:
+ clean_complete += ring_data->poll_one(
+ ring_data, budget - clean_complete,
+ ring_data->ex_process);
+
+ if (clean_complete < budget) {
+ if (ring_data->fini_process(ring_data)) {
+ napi_complete(napi);
+ ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ } else {
+ goto try_again;
+ }
}
return clean_complete;
@@ -1196,54 +1208,31 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx)
napi_disable(&priv->ring_data[idx].napi);
}
-static void hns_set_irq_affinity(struct hns_nic_priv *priv)
+static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+ struct hnae_ring *ring, cpumask_t *mask)
{
- struct hnae_handle *h = priv->ae_handle;
- struct hns_nic_ring_data *rd;
- int i;
int cpu;
- cpumask_var_t mask;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return;
- /*diffrent irq banlance for 16core and 32core*/
- if (h->q_num == num_possible_cpus()) {
- for (i = 0; i < h->q_num * 2; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index)) {
- cpumask_clear(mask);
- cpu = rd->queue_index;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
+ /* Diffrent irq banlance between 16core and 32core.
+ * The cpu mask set by ring index according to the ring flag
+ * which indicate the ring is tx or rx.
+ */
+ if (q_num == num_possible_cpus()) {
+ if (is_tx_ring(ring))
+ cpu = ring_idx;
+ else
+ cpu = ring_idx - q_num;
} else {
- for (i = 0; i < h->q_num; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index * 2)) {
- cpumask_clear(mask);
- cpu = rd->queue_index * 2;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
-
- for (i = h->q_num; i < h->q_num * 2; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index * 2 + 1)) {
- cpumask_clear(mask);
- cpu = rd->queue_index * 2 + 1;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
+ if (is_tx_ring(ring))
+ cpu = ring_idx * 2;
+ else
+ cpu = (ring_idx - q_num) * 2 + 1;
}
- free_cpumask_var(mask);
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+
+ return cpu;
}
static int hns_nic_init_irq(struct hns_nic_priv *priv)
@@ -1252,6 +1241,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
struct hns_nic_ring_data *rd;
int i;
int ret;
+ int cpu;
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
@@ -1261,7 +1251,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
- (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+ (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
@@ -1273,12 +1263,17 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
return ret;
}
disable_irq(rd->ring->irq);
+
+ cpu = hns_nic_init_affinity_mask(h->q_num, i,
+ rd->ring, &rd->mask);
+
+ if (cpu_online(cpu))
+ irq_set_affinity_hint(rd->ring->irq,
+ &rd->mask);
+
rd->ring->irq_init_flag = RCB_IRQ_INITED;
}
- /*set cpu affinity*/
- hns_set_irq_affinity(priv);
-
return 0;
}
@@ -1487,32 +1482,259 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
return (netdev_tx_t)ret;
}
+static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+#define HNS_LB_TX_RING 0
+static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ int frame_len;
+
+ /* allocate test skb */
+ skb = alloc_skb(64, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, 64);
+ skb->dev = ndev;
+ memset(skb->data, 0xFF, skb->len);
+
+ /* must be tcp/ip package */
+ ethhdr = (struct ethhdr *)skb->data;
+ ethhdr->h_proto = htons(ETH_P_IP);
+
+ frame_len = skb->len & (~1ul);
+ memset(&skb->data[frame_len / 2], 0xAA,
+ frame_len / 2 - 1);
+
+ skb->queue_mapping = HNS_LB_TX_RING;
+
+ return skb;
+}
+
+static int hns_enable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ int speed, duplex;
+ int ret;
+
+ ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
+ if (ret)
+ return ret;
+
+ ret = ops->start ? ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ ops->adjust_link(h, speed, duplex);
+
+ /* wait h/w ready */
+ mdelay(300);
+
+ return 0;
+}
+
+static void hns_disable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ ops->stop(h);
+ ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
+}
+
+/**
+ *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
+ *function as follows:
+ * 1. if one rx ring has found the page_offset is not equal 0 between head
+ * and tail, it means that the chip fetched the wrong descs for the ring
+ * which buffer size is 4096.
+ * 2. we set the chip serdes loopback and set rss indirection to the ring.
+ * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
+ * recieving all packages and it will fetch new descriptions.
+ * 4. recover to the original state.
+ *
+ *@ndev: net device
+ */
+static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ struct hns_nic_ring_data *rd;
+ struct hnae_ring *ring;
+ struct sk_buff *skb;
+ u32 *org_indir;
+ u32 *cur_indir;
+ int indir_size;
+ int head, tail;
+ int fetch_num;
+ int i, j;
+ bool found;
+ int retry_times;
+ int ret = 0;
+
+ /* alloc indir memory */
+ indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
+ org_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!org_indir)
+ return -ENOMEM;
+
+ /* store the orginal indirection */
+ ops->get_rss(h, org_indir, NULL, NULL);
+
+ cur_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!cur_indir) {
+ ret = -ENOMEM;
+ goto cur_indir_alloc_err;
+ }
+
+ /* set loopback */
+ if (hns_enable_serdes_lb(ndev)) {
+ ret = -EINVAL;
+ goto enable_serdes_lb_err;
+ }
+
+ /* foreach every rx ring to clear fetch desc */
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
+ found = false;
+ fetch_num = ring_dist(ring, head, tail);
+
+ while (head != tail) {
+ if (ring->desc_cb[head].page_offset != 0) {
+ found = true;
+ break;
+ }
+
+ head++;
+ if (head == ring->desc_num)
+ head = 0;
+ }
+
+ if (found) {
+ for (j = 0; j < indir_size / sizeof(*org_indir); j++)
+ cur_indir[j] = i;
+ ops->set_rss(h, cur_indir, NULL, 0);
+
+ for (j = 0; j < fetch_num; j++) {
+ /* alloc one skb and init */
+ skb = hns_assemble_skb(ndev);
+ if (!skb)
+ goto out;
+ rd = &tx_ring_data(priv, skb->queue_mapping);
+ hns_nic_net_xmit_hw(ndev, skb, rd);
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean rx */
+ rd = &rx_ring_data(priv, i);
+ if (rd->poll_one(rd, fetch_num,
+ hns_nic_drop_rx_fetch))
+ break;
+ }
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean tx ring 0 send package */
+ rd = &tx_ring_data(priv,
+ HNS_LB_TX_RING);
+ if (rd->poll_one(rd, fetch_num, NULL))
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ /* restore everything */
+ ops->set_rss(h, org_indir, NULL, 0);
+ hns_disable_serdes_lb(ndev);
+enable_serdes_lb_err:
+ kfree(cur_indir);
+cur_indir_alloc_err:
+ kfree(org_indir);
+
+ return ret;
+}
+
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
int ret;
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ /* MTU no change */
+ if (new_mtu == ndev->mtu)
+ return 0;
+
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
- if (netif_running(ndev)) {
+ if (if_running) {
(void)hns_nic_net_stop(ndev);
msleep(100);
+ }
- ret = h->dev->ops->set_mtu(h, new_mtu);
- if (ret)
- netdev_err(ndev, "set mtu fail, return value %d\n",
- ret);
+ if (priv->enet_ver != AE_VERSION_1 &&
+ ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
+ new_mtu > BD_SIZE_2048_MAX_MTU) {
+ /* update desc */
+ hnae_reinit_all_ring_desc(h);
- if (hns_nic_net_open(ndev))
- netdev_err(ndev, "hns net open fail\n");
- } else {
- ret = h->dev->ops->set_mtu(h, new_mtu);
+ /* clear the package which the chip has fetched */
+ ret = hns_nic_clear_all_rx_fetch(ndev);
+
+ /* the page offset must be consist with desc */
+ hnae_reinit_all_ring_page_off(h);
+
+ if (ret) {
+ netdev_err(ndev, "clear the fetched desc fail\n");
+ goto out;
+ }
+ }
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret) {
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+ goto out;
}
- if (!ret)
- ndev->mtu = new_mtu;
+ /* finally, set new mtu to netdevice */
+ ndev->mtu = new_mtu;
+
+out:
+ if (if_running) {
+ if (hns_nic_net_open(ndev)) {
+ netdev_err(ndev, "hns net open fail\n");
+ ret = -EINVAL;
+ }
+ }
return ret;
}
@@ -1791,7 +2013,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
-
+ /* make sure to commit the things */
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 5b412de350aa..1b83232082b2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -37,10 +37,11 @@ enum hns_nic_state {
struct hns_nic_ring_data {
struct hnae_ring *ring;
struct napi_struct napi;
+ cpumask_t mask; /* affinity mask */
int queue_index;
int (*poll_one)(struct hns_nic_ring_data *, int, void *);
void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
- void (*fini_process)(struct hns_nic_ring_data *);
+ bool (*fini_process)(struct hns_nic_ring_data *);
};
/* compatible the difference between two versions */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3ac2183dbd21..b8fab149690f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -146,7 +146,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
/* When there is no phy, autoneg is off. */
cmd->base.autoneg = false;
- cmd->base.cmd = speed;
+ cmd->base.speed = speed;
cmd->base.duplex = duplex;
if (net_dev->phydev)
@@ -764,14 +764,14 @@ static int hns_get_coalesce(struct net_device *net_dev,
ec->use_adaptive_tx_coalesce = 1;
if ((!ops->get_coalesce_usecs) ||
- (!ops->get_rx_max_coalesced_frames))
+ (!ops->get_max_coalesced_frames))
return -ESRCH;
ops->get_coalesce_usecs(priv->ae_handle,
&ec->tx_coalesce_usecs,
&ec->rx_coalesce_usecs);
- ops->get_rx_max_coalesced_frames(
+ ops->get_max_coalesced_frames(
priv->ae_handle,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
@@ -801,30 +801,28 @@ static int hns_set_coalesce(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- int ret;
+ int rc1, rc2;
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
return -EINVAL;
- if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
- return -EINVAL;
-
if ((!ops->set_coalesce_usecs) ||
(!ops->set_coalesce_frames))
return -ESRCH;
- ret = ops->set_coalesce_usecs(priv->ae_handle,
+ rc1 = ops->set_coalesce_usecs(priv->ae_handle,
ec->rx_coalesce_usecs);
- if (ret)
- return ret;
- ret = ops->set_coalesce_frames(
- priv->ae_handle,
- ec->rx_max_coalesced_frames);
+ rc2 = ops->set_coalesce_frames(priv->ae_handle,
+ ec->tx_max_coalesced_frames,
+ ec->rx_max_coalesced_frames);
- return ret;
+ if (rc1 || rc2)
+ return -EINVAL;
+
+ return 0;
}
/**
@@ -1253,12 +1251,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
- /* currently hfunc can only be Toeplitz hash */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
- if (!indir)
- return 0;
+ }
return ops->set_rss(priv->ae_handle, indir, key, hfunc);
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 501eb2090ca6..e5221d95afe1 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -23,17 +23,9 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/spinlock_types.h>
#define MDIO_DRV_NAME "Hi-HNS_MDIO"
#define MDIO_BUS_NAME "Hisilicon MII Bus"
-#define MDIO_DRV_VERSION "1.3.0"
-#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
-#define MDIO_DRV_STRING MDIO_BUS_NAME
-#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
-
-#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
-#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_TIMEOUT 1000000
@@ -64,9 +56,7 @@ struct hns_mdio_device {
#define MDIO_CMD_DEVAD_S 0
#define MDIO_CMD_PRTAD_M 0x1f
#define MDIO_CMD_PRTAD_S 5
-#define MDIO_CMD_OP_M 0x3
#define MDIO_CMD_OP_S 10
-#define MDIO_CMD_ST_M 0x3
#define MDIO_CMD_ST_S 12
#define MDIO_CMD_START_B 14
@@ -185,18 +175,20 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
static int hns_mdio_wait_ready(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = bus->priv;
+ u32 cmd_reg_value;
int i;
- u32 cmd_reg_value = 1;
/* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
/* after that can do read or write*/
- for (i = 0; cmd_reg_value; i++) {
+ for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
MDIO_COMMAND_REG,
MDIO_CMD_START_B);
- if (i == MDIO_TIMEOUT)
- return -ETIMEDOUT;
+ if (!cmd_reg_value)
+ break;
}
+ if ((i == MDIO_TIMEOUT) && cmd_reg_value)
+ return -ETIMEDOUT;
return 0;
}
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
index eba21835d90d..98768ba0955a 100644
--- a/drivers/net/ethernet/ibm/emac/Makefile
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -8,4 +8,3 @@ ibm_emac-y := mal.o core.o phy.o
ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o
ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o
ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o
-ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index c44036d5761a..508923f39ccf 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1929,7 +1929,7 @@ static struct net_device_stats *emac_stats(struct net_device *ndev)
struct emac_instance *dev = netdev_priv(ndev);
struct emac_stats *st = &dev->stats;
struct emac_error_stats *est = &dev->estats;
- struct net_device_stats *nst = &dev->nstats;
+ struct net_device_stats *nst = &ndev->stats;
unsigned long flags;
DBG2(dev, "stats" NL);
@@ -3173,8 +3173,6 @@ static int emac_probe(struct platform_device *ofdev)
printk("%s: found %s PHY (0x%02x)\n", ndev->name,
dev->phy.def->name, dev->phy.address);
- emac_dbg_register(dev);
-
/* Life is good */
return 0;
@@ -3243,7 +3241,6 @@ static int emac_remove(struct platform_device *ofdev)
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
- emac_dbg_unregister(dev);
iounmap(dev->emacp);
if (dev->wol_irq)
@@ -3326,9 +3323,6 @@ static int __init emac_init(void)
printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
- /* Init debug stuff */
- emac_init_debug();
-
/* Build EMAC boot list */
emac_make_bootlist();
@@ -3373,7 +3367,6 @@ static void __exit emac_exit(void)
rgmii_exit();
zmii_exit();
mal_exit();
- emac_fini_debug();
/* Destroy EMAC boot list */
for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 0710a6685489..f10e156641d5 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -265,7 +265,6 @@ struct emac_instance {
/* Stats
*/
struct emac_error_stats estats;
- struct net_device_stats nstats;
struct emac_stats stats;
/* Misc
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
deleted file mode 100644
index a559f326bf63..000000000000
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * drivers/net/ethernet/ibm/emac/debug.c
- *
- * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sysrq.h>
-#include <asm/io.h>
-
-#include "core.h"
-
-static DEFINE_SPINLOCK(emac_dbg_lock);
-
-static void emac_desc_dump(struct emac_instance *p)
-{
- int i;
- printk("** EMAC %s TX BDs **\n"
- " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
- p->ofdev->dev.of_node->full_name,
- p->tx_cnt, p->tx_slot, p->ack_slot);
- for (i = 0; i < NUM_TX_BUFF / 2; ++i)
- printk
- ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
- i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
- p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
- NUM_TX_BUFF / 2 + i,
- p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
- p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
- p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
- p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
-
- printk("** EMAC %s RX BDs **\n"
- " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
- " rx_sg_skb = 0x%p\n",
- p->ofdev->dev.of_node->full_name,
- p->rx_slot, p->commac.flags, p->rx_skb_size,
- p->rx_sync_size, p->rx_sg_skb);
- for (i = 0; i < NUM_RX_BUFF / 2; ++i)
- printk
- ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
- i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
- p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
- NUM_RX_BUFF / 2 + i,
- p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
- p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
- p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
- p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
-}
-
-static void emac_mac_dump(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- const int xaht_regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
- u32 *iaht_base = emac_iaht_base(dev);
- int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
- int n;
-
- printk("** EMAC %s registers **\n"
- "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
- "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
- "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
- dev->ofdev->dev.of_node->full_name,
- in_be32(&p->mr0), in_be32(&p->mr1),
- in_be32(&p->tmr0), in_be32(&p->tmr1),
- in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
- in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
- in_be32(&p->vtci)
- );
-
- if (emac4sync)
- printk("MAR = %04x%08x MMAR = %04x%08x\n",
- in_be32(&p->u0.emac4sync.mahr),
- in_be32(&p->u0.emac4sync.malr),
- in_be32(&p->u0.emac4sync.mmahr),
- in_be32(&p->u0.emac4sync.mmalr)
- );
-
- for (n = 0; n < xaht_regs; n++)
- printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
-
- for (n = 0; n < xaht_regs; n++)
- printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
-
- printk("LSA = %04x%08x IPGVR = 0x%04x\n"
- "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
- "OCTX = 0x%08x OCRX = 0x%08x\n",
- in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
- in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
- in_be32(&p->octx), in_be32(&p->ocrx)
- );
-
- if (!emac4sync) {
- printk("IPCR = 0x%08x\n",
- in_be32(&p->u1.emac4.ipcr)
- );
- } else {
- printk("REVID = 0x%08x TPC = 0x%08x\n",
- in_be32(&p->u1.emac4sync.revid),
- in_be32(&p->u1.emac4sync.tpc)
- );
- }
-
- emac_desc_dump(dev);
-}
-
-static void emac_mal_dump(struct mal_instance *mal)
-{
- int i;
-
- printk("** MAL %s Registers **\n"
- "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
- "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
- "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
- mal->ofdev->dev.of_node->full_name,
- get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
- get_mal_dcrn(mal, MAL_IER),
- get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
- get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
- get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
- get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
- );
-
- printk("TX|");
- for (i = 0; i < mal->num_tx_chans; ++i) {
- if (i && !(i % 4))
- printk("\n ");
- printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
- }
- printk("\nRX|");
- for (i = 0; i < mal->num_rx_chans; ++i) {
- if (i && !(i % 4))
- printk("\n ");
- printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
- }
- printk("\n ");
- for (i = 0; i < mal->num_rx_chans; ++i) {
- u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
- if (i && !(i % 3))
- printk("\n ");
- printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
- }
- printk("\n");
-}
-
-static struct emac_instance *__emacs[4];
-static struct mal_instance *__mals[1];
-
-void emac_dbg_register(struct emac_instance *dev)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__emacs); i++)
- if (__emacs[i] == NULL) {
- __emacs[i] = dev;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_unregister(struct emac_instance *dev)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__emacs); i++)
- if (__emacs[i] == dev) {
- __emacs[i] = NULL;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_register(struct mal_instance *mal)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__mals); i++)
- if (__mals[i] == NULL) {
- __mals[i] = mal;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_unregister(struct mal_instance *mal)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__mals); i++)
- if (__mals[i] == mal) {
- __mals[i] = NULL;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_dump_all(void)
-{
- unsigned int i;
- unsigned long flags;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
-
- for (i = 0; i < ARRAY_SIZE(__mals); ++i)
- if (__mals[i])
- emac_mal_dump(__mals[i]);
-
- for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
- if (__emacs[i])
- emac_mac_dump(__emacs[i]);
-
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key)
-{
- emac_dbg_dump_all();
-}
-
-static struct sysrq_key_op emac_sysrq_op = {
- .handler = emac_sysrq_handler,
- .help_msg = "emac(c)",
- .action_msg = "Show EMAC(s) status",
-};
-
-int __init emac_init_debug(void)
-{
- return register_sysrq_key('c', &emac_sysrq_op);
-}
-
-void __exit emac_fini_debug(void)
-{
- unregister_sysrq_key('c', &emac_sysrq_op);
-}
-
-#else
-int __init emac_init_debug(void)
-{
- return 0;
-}
-void __exit emac_fini_debug(void)
-{
-}
-#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 9c45efe4c8fe..5bdfc174a07e 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -25,32 +25,9 @@
#include "core.h"
#if defined(CONFIG_IBM_EMAC_DEBUG)
-
-struct emac_instance;
-struct mal_instance;
-
-void emac_dbg_register(struct emac_instance *dev);
-void emac_dbg_unregister(struct emac_instance *dev);
-void mal_dbg_register(struct mal_instance *mal);
-void mal_dbg_unregister(struct mal_instance *mal);
-int emac_init_debug(void) __init;
-void emac_fini_debug(void) __exit;
-void emac_dbg_dump_all(void);
-
# define DBG_LEVEL 1
-
#else
-
-# define emac_dbg_register(x) do { } while(0)
-# define emac_dbg_unregister(x) do { } while(0)
-# define mal_dbg_register(x) do { } while(0)
-# define mal_dbg_unregister(x) do { } while(0)
-# define emac_init_debug() do { } while(0)
-# define emac_fini_debug() do { } while(0)
-# define emac_dbg_dump_all() do { } while(0)
-
# define DBG_LEVEL 0
-
#endif
#define EMAC_DBG(d, name, fmt, arg...) \
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index cd3227b088b7..91b1a558f37d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -695,8 +695,6 @@ static int mal_probe(struct platform_device *ofdev)
wmb();
platform_set_drvdata(ofdev, mal);
- mal_dbg_register(mal);
-
return 0;
fail6:
@@ -740,8 +738,6 @@ static int mal_remove(struct platform_device *ofdev)
mal_reset(mal);
- mal_dbg_unregister(mal);
-
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 7acda04d034e..ed8780cca982 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -146,7 +146,6 @@ struct ibmveth_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
struct napi_struct napi;
- struct net_device_stats stats;
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b23d6545f835..4fcd2f0378ba 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -65,7 +65,6 @@
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
-#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <net/net_namespace.h>
#include <asm/hvcall.h>
@@ -75,6 +74,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
#include "ibmvnic.h"
@@ -89,7 +89,6 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -110,6 +109,11 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_map_query(struct ibmvnic_adapter *adapter);
static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
+static void send_login(struct ibmvnic_adapter *adapter);
+static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
+static int ibmvnic_init(struct ibmvnic_adapter *);
+static void release_crq_queue(struct ibmvnic_adapter *);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -159,21 +163,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
-/* net_device_ops functions */
-
-static void init_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *rx_pool, int num, int index,
- int buff_size, int active)
-{
- netdev_dbg(adapter->netdev,
- "Initializing rx_pool %d, %d buffs, %d bytes each\n",
- index, num, buff_size);
- rx_pool->size = num;
- rx_pool->index = index;
- rx_pool->buff_size = buff_size;
- rx_pool->active = active;
-}
-
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -202,45 +191,12 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ if (!ltb->buff)
+ return;
+
if (!adapter->failover)
send_request_unmap(adapter, ltb->map_id);
-}
-
-static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *pool)
-{
- struct device *dev = &adapter->vdev->dev;
- int i;
-
- pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
- if (!pool->free_map)
- return -ENOMEM;
-
- pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
- GFP_KERNEL);
-
- if (!pool->rx_buff) {
- dev_err(dev, "Couldn't alloc rx buffers\n");
- kfree(pool->free_map);
- return -ENOMEM;
- }
-
- if (alloc_long_term_buff(adapter, &pool->long_term_buff,
- pool->size * pool->buff_size)) {
- kfree(pool->free_map);
- kfree(pool->rx_buff);
- return -ENOMEM;
- }
-
- for (i = 0; i < pool->size; ++i)
- pool->free_map[i] = i;
-
- atomic_set(&pool->available, 0);
- pool->next_alloc = 0;
- pool->next_free = 0;
-
- return 0;
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
@@ -347,93 +303,195 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
}
}
-static void free_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *pool)
+static void release_stats_token(struct ibmvnic_adapter *adapter)
{
- int i;
+ struct device *dev = &adapter->vdev->dev;
+
+ if (!adapter->stats_token)
+ return;
+
+ dma_unmap_single(dev, adapter->stats_token,
+ sizeof(struct ibmvnic_statistics),
+ DMA_FROM_DEVICE);
+ adapter->stats_token = 0;
+}
+
+static int init_stats_token(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ dma_addr_t stok;
+
+ stok = dma_map_single(dev, &adapter->stats,
+ sizeof(struct ibmvnic_statistics),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, stok)) {
+ dev_err(dev, "Couldn't map stats buffer\n");
+ return -1;
+ }
+
+ adapter->stats_token = stok;
+ return 0;
+}
- kfree(pool->free_map);
- pool->free_map = NULL;
+static void release_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ int rx_scrqs;
+ int i, j;
- if (!pool->rx_buff)
+ if (!adapter->rx_pool)
return;
- for (i = 0; i < pool->size; i++) {
- if (pool->rx_buff[i].skb) {
- dev_kfree_skb_any(pool->rx_buff[i].skb);
- pool->rx_buff[i].skb = NULL;
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+
+ kfree(rx_pool->free_map);
+ free_long_term_buff(adapter, &rx_pool->long_term_buff);
+
+ if (!rx_pool->rx_buff)
+ continue;
+
+ for (j = 0; j < rx_pool->size; j++) {
+ if (rx_pool->rx_buff[j].skb) {
+ dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
+ rx_pool->rx_buff[i].skb = NULL;
+ }
}
+
+ kfree(rx_pool->rx_buff);
}
- kfree(pool->rx_buff);
- pool->rx_buff = NULL;
+
+ kfree(adapter->rx_pool);
+ adapter->rx_pool = NULL;
}
-static int ibmvnic_open(struct net_device *netdev)
+static int init_rx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_tx_pool *tx_pool;
- union ibmvnic_crq crq;
+ struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
u64 *size_array;
- int tx_subcrqs;
int i, j;
rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- tx_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_buff_size));
- adapter->map_id = 1;
- adapter->napi = kcalloc(adapter->req_rx_queues,
- sizeof(struct napi_struct), GFP_KERNEL);
- if (!adapter->napi)
- goto alloc_napi_failed;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
- NAPI_POLL_WEIGHT);
- napi_enable(&adapter->napi[i]);
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+
+ adapter->rx_pool = kcalloc(rxadd_subcrqs,
+ sizeof(struct ibmvnic_rx_pool),
+ GFP_KERNEL);
+ if (!adapter->rx_pool) {
+ dev_err(dev, "Failed to allocate rx pools\n");
+ return -1;
}
- adapter->rx_pool =
- kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
- if (!adapter->rx_pool)
- goto rx_pool_arr_alloc_failed;
- send_map_query(adapter);
for (i = 0; i < rxadd_subcrqs; i++) {
- init_rx_pool(adapter, &adapter->rx_pool[i],
- adapter->req_rx_add_entries_per_subcrq, i,
- be64_to_cpu(size_array[i]), 1);
- if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
- dev_err(dev, "Couldn't alloc rx pool\n");
- goto rx_pool_alloc_failed;
+ rx_pool = &adapter->rx_pool[i];
+
+ netdev_dbg(adapter->netdev,
+ "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
+ i, adapter->req_rx_add_entries_per_subcrq,
+ be64_to_cpu(size_array[i]));
+
+ rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+ rx_pool->index = i;
+ rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->active = 1;
+
+ rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
+ GFP_KERNEL);
+ if (!rx_pool->free_map) {
+ release_rx_pools(adapter);
+ return -1;
+ }
+
+ rx_pool->rx_buff = kcalloc(rx_pool->size,
+ sizeof(struct ibmvnic_rx_buff),
+ GFP_KERNEL);
+ if (!rx_pool->rx_buff) {
+ dev_err(dev, "Couldn't alloc rx buffers\n");
+ release_rx_pools(adapter);
+ return -1;
+ }
+
+ if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size)) {
+ release_rx_pools(adapter);
+ return -1;
}
+
+ for (j = 0; j < rx_pool->size; ++j)
+ rx_pool->free_map[j] = j;
+
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ }
+
+ return 0;
+}
+
+static void release_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ int i, tx_scrqs;
+
+ if (!adapter->tx_pool)
+ return;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+ kfree(tx_pool->tx_buff);
+ free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ kfree(tx_pool->free_map);
}
- adapter->tx_pool =
- kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
+}
+
+static int init_tx_pools(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_tx_pool *tx_pool;
+ int tx_subcrqs;
+ int i, j;
+
+ tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ adapter->tx_pool = kcalloc(tx_subcrqs,
+ sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- goto tx_pool_arr_alloc_failed;
+ return -1;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
- tx_pool->tx_buff =
- kcalloc(adapter->req_tx_entries_per_subcrq,
- sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
- if (!tx_pool->tx_buff)
- goto tx_pool_alloc_failed;
+ tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
+ sizeof(struct ibmvnic_tx_buff),
+ GFP_KERNEL);
+ if (!tx_pool->tx_buff) {
+ dev_err(dev, "tx pool buffer allocation failed\n");
+ release_tx_pools(adapter);
+ return -1;
+ }
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
adapter->req_tx_entries_per_subcrq *
- adapter->req_mtu))
- goto tx_ltb_alloc_failed;
+ adapter->req_mtu)) {
+ release_tx_pools(adapter);
+ return -1;
+ }
- tx_pool->free_map =
- kcalloc(adapter->req_tx_entries_per_subcrq,
- sizeof(int), GFP_KERNEL);
- if (!tx_pool->free_map)
- goto tx_fm_alloc_failed;
+ tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map) {
+ release_tx_pools(adapter);
+ return -1;
+ }
for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
tx_pool->free_map[j] = j;
@@ -441,20 +499,183 @@ static int ibmvnic_open(struct net_device *netdev)
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
}
- adapter->bounce_buffer_size =
- (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
- adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
- GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto bounce_alloc_failed;
- adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
- adapter->bounce_buffer_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dev_err(dev, "Couldn't map tx bounce buffer\n");
- goto bounce_map_failed;
+ return 0;
+}
+
+static void release_error_buffers(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_error_buff *error_buff, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
+ list_del(&error_buff->list);
+ dma_unmap_single(dev, error_buff->dma, error_buff->len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ kfree(error_buff);
+ }
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+}
+
+static int ibmvnic_login(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long timeout = msecs_to_jiffies(30000);
+ struct device *dev = &adapter->vdev->dev;
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs(adapter);
+
+ reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Capabilities query timeout\n");
+ return -1;
+ }
+ }
+
+ reinit_completion(&adapter->init_done);
+ send_login(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Login timeout\n");
+ return -1;
+ }
+ } while (adapter->renegotiate);
+
+ return 0;
+}
+
+static void release_resources(struct ibmvnic_adapter *adapter)
+{
+ release_tx_pools(adapter);
+ release_rx_pools(adapter);
+
+ release_stats_token(adapter);
+ release_error_buffers(adapter);
+}
+
+static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ union ibmvnic_crq crq;
+ bool resend;
+ int rc;
+
+ if (adapter->logical_link_state == link_state) {
+ netdev_dbg(netdev, "Link state already %d\n", link_state);
+ return 0;
+ }
+
+ netdev_err(netdev, "setting link state %d\n", link_state);
+ memset(&crq, 0, sizeof(crq));
+ crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
+ crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
+ crq.logical_link_state.link_state = link_state;
+
+ do {
+ resend = false;
+
+ reinit_completion(&adapter->init_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ netdev_err(netdev, "Failed to set link state\n");
+ return rc;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ netdev_err(netdev, "timeout setting link state\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc == 1) {
+ /* Partuial success, delay and re-send */
+ mdelay(1000);
+ resend = true;
+ }
+ } while (resend);
+
+ return 0;
+}
+
+static int set_real_num_queues(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
+ if (rc) {
+ netdev_err(netdev, "failed to set the number of tx queues\n");
+ return rc;
+ }
+
+ rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
+ if (rc)
+ netdev_err(netdev, "failed to set the number of rx queues\n");
+
+ return rc;
+}
+
+static int ibmvnic_open(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ int rc = 0;
+ int i;
+
+ if (adapter->is_closed) {
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return rc;
}
+
+ rc = ibmvnic_login(netdev);
+ if (rc)
+ return rc;
+
+ rc = set_real_num_queues(netdev);
+ if (rc)
+ return rc;
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "failed to initialize sub crq irqs\n");
+ return -1;
+ }
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ return rc;
+
+ adapter->map_id = 1;
+ adapter->napi = kcalloc(adapter->req_rx_queues,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!adapter->napi)
+ goto ibmvnic_open_fail;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&adapter->napi[i]);
+ }
+
+ send_map_query(adapter);
+
+ rc = init_rx_pools(netdev);
+ if (rc)
+ goto ibmvnic_open_fail;
+
+ rc = init_tx_pools(netdev);
+ if (rc)
+ goto ibmvnic_open_fail;
+
replenish_pools(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
@@ -466,106 +687,63 @@ static int ibmvnic_open(struct net_device *netdev)
for (i = 0; i < adapter->req_tx_queues; i++)
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
- memset(&crq, 0, sizeof(crq));
- crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
- crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
- crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
- ibmvnic_send_crq(adapter, &crq);
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
+ if (rc)
+ goto ibmvnic_open_fail;
netif_tx_start_all_queues(netdev);
+ adapter->is_closed = false;
return 0;
-bounce_map_failed:
- kfree(adapter->bounce_buffer);
-bounce_alloc_failed:
- i = tx_subcrqs - 1;
- kfree(adapter->tx_pool[i].free_map);
-tx_fm_alloc_failed:
- free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
-tx_ltb_alloc_failed:
- kfree(adapter->tx_pool[i].tx_buff);
-tx_pool_alloc_failed:
- for (j = 0; j < i; j++) {
- kfree(adapter->tx_pool[j].tx_buff);
- free_long_term_buff(adapter,
- &adapter->tx_pool[j].long_term_buff);
- kfree(adapter->tx_pool[j].free_map);
- }
- kfree(adapter->tx_pool);
- adapter->tx_pool = NULL;
-tx_pool_arr_alloc_failed:
- i = rxadd_subcrqs;
-rx_pool_alloc_failed:
- for (j = 0; j < i; j++) {
- free_rx_pool(adapter, &adapter->rx_pool[j]);
- free_long_term_buff(adapter,
- &adapter->rx_pool[j].long_term_buff);
- }
- kfree(adapter->rx_pool);
- adapter->rx_pool = NULL;
-rx_pool_arr_alloc_failed:
+ibmvnic_open_fail:
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
-alloc_napi_failed:
+ release_resources(adapter);
return -ENOMEM;
}
+static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ disable_irq(adapter->tx_scrq[i]->irq);
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ disable_irq(adapter->rx_scrq[i]->irq);
+ }
+}
+
static int ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq crq;
+ int rc = 0;
int i;
adapter->closing = true;
+ disable_sub_crqs(adapter);
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ if (adapter->napi) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+ }
if (!adapter->failover)
netif_tx_stop_all_queues(netdev);
- if (adapter->bounce_buffer) {
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->bounce_buffer_dma,
- adapter->bounce_buffer_size,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
- }
-
- memset(&crq, 0, sizeof(crq));
- crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
- crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
- crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
- ibmvnic_send_crq(adapter, &crq);
-
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- i++) {
- kfree(adapter->tx_pool[i].tx_buff);
- free_long_term_buff(adapter,
- &adapter->tx_pool[i].long_term_buff);
- kfree(adapter->tx_pool[i].free_map);
- }
- kfree(adapter->tx_pool);
- adapter->tx_pool = NULL;
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
- free_rx_pool(adapter, &adapter->rx_pool[i]);
- free_long_term_buff(adapter,
- &adapter->rx_pool[i].long_term_buff);
- }
- kfree(adapter->rx_pool);
- adapter->rx_pool = NULL;
+ release_resources(adapter);
+ adapter->is_closed = true;
adapter->closing = false;
-
- return 0;
+ return rc;
}
/**
@@ -714,7 +892,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
- bool used_bounce = false;
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
@@ -731,9 +908,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
be32_to_cpu(adapter->login_rsp_buf->
off_txsubm_subcrqs));
if (adapter->migrated) {
+ if (!netif_subqueue_stopped(netdev, skb))
+ netif_stop_subqueue(netdev, queue_num);
+ dev_kfree_skb_any(skb);
+
tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
@@ -755,7 +936,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->index = index;
tx_buff->pool_index = queue_num;
tx_buff->last_frag = true;
- tx_buff->used_bounce = used_bounce;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -800,11 +980,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
sizeof(tx_buff->indir_arr),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(dev, "tx: unable to map descriptor array\n");
tx_map_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
@@ -823,15 +1005,20 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool->consumer_index--;
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
+
+ if (lpar_rc == H_CLOSED)
+ netif_stop_subqueue(netdev, queue_num);
+
tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
- atomic_inc(&tx_scrq->used);
-
- if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ if (atomic_inc_return(&tx_scrq->used)
+ >= adapter->req_tx_entries_per_subcrq) {
netdev_info(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
@@ -975,7 +1162,15 @@ restart_poll:
skb = rx_buff->skb;
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
- skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
+
+ /* VLAN Header has been stripped by the system firmware and
+ * needs to be inserted by the driver
+ */
+ if (adapter->rx_vlan_header_insertion &&
+ (flags & IBMVNIC_VLAN_STRIPPED))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ ntohs(next->rx_comp.vlan_tci));
+
/* free the entry */
next->rx_comp.first = 0;
remove_buff_from_pool(adapter, rx_buff);
@@ -1176,6 +1371,12 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
scrq->crq_num);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Failed to release sub-CRQ %16lx, rc = %ld\n",
+ scrq->crq_num, rc);
+ }
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
@@ -1189,12 +1390,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
struct ibmvnic_sub_crq_queue *scrq;
int rc;
- scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
+ scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
- memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ scrq->msgs =
+ (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed;
@@ -1222,9 +1423,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
- scrq->cur = 0;
- atomic_set(&scrq->used, 0);
- scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
@@ -1249,49 +1447,40 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
int i;
if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++)
- if (adapter->tx_scrq[i]) {
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ if (!adapter->tx_scrq[i])
+ continue;
+
+ if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
irq_dispose_mapping(adapter->tx_scrq[i]->irq);
- release_sub_crq_queue(adapter,
- adapter->tx_scrq[i]);
+ adapter->tx_scrq[i]->irq = 0;
}
+
+ release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ }
+
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
}
if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- if (adapter->rx_scrq[i]) {
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (!adapter->rx_scrq[i])
+ continue;
+
+ if (adapter->rx_scrq[i]->irq) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
irq_dispose_mapping(adapter->rx_scrq[i]->irq);
- release_sub_crq_queue(adapter,
- adapter->rx_scrq[i]);
+ adapter->rx_scrq[i]->irq = 0;
}
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
- }
-}
-
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
-{
- int i;
- if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++)
- if (adapter->tx_scrq[i])
- release_sub_crq_queue(adapter,
- adapter->tx_scrq[i]);
- adapter->tx_scrq = NULL;
- }
+ release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ }
- if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- if (adapter->rx_scrq[i])
- release_sub_crq_queue(adapter,
- adapter->rx_scrq[i]);
+ kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
}
}
@@ -1358,7 +1547,6 @@ restart_loop:
continue;
txbuff->data_dma[j] = 0;
- txbuff->used_bounce = false;
}
/* if sub_crq was sent indirectly */
first = txbuff->indir_arr[0].generic.first;
@@ -1369,9 +1557,8 @@ restart_loop:
}
if (txbuff->last_frag) {
- atomic_dec(&scrq->used);
-
- if (atomic_read(&scrq->used) <=
+ if (atomic_sub_return(next->tx_comp.num_comps,
+ &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
netif_subqueue_stopped(adapter->netdev,
txbuff->skb)) {
@@ -1487,52 +1674,24 @@ req_tx_irq_failed:
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
}
- release_sub_crqs_no_irqs(adapter);
+ release_sub_crqs(adapter);
return rc;
}
-static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
+static int init_sub_crqs(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_sub_crq_queue **allqueues;
int registered_queues = 0;
- union ibmvnic_crq crq;
int total_queues;
int more = 0;
int i;
- if (!retry) {
- /* Sub-CRQ entries are 32 byte long */
- int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
-
- if (adapter->min_tx_entries_per_subcrq > entries_page ||
- adapter->min_rx_add_entries_per_subcrq > entries_page) {
- dev_err(dev, "Fatal, invalid entries per sub-crq\n");
- goto allqueues_failed;
- }
-
- /* Get the minimum between the queried max and the entries
- * that fit in our PAGE_SIZE
- */
- adapter->req_tx_entries_per_subcrq =
- adapter->max_tx_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_tx_entries_per_subcrq;
- adapter->req_rx_add_entries_per_subcrq =
- adapter->max_rx_add_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_rx_add_entries_per_subcrq;
-
- adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
- adapter->req_rx_queues = adapter->opt_rx_comp_queues;
- adapter->req_rx_add_queues = adapter->max_rx_add_queues;
-
- adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
- }
-
total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
- allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
+ allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- goto allqueues_failed;
+ return -1;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -1570,7 +1729,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
}
adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
- sizeof(*adapter->tx_scrq), GFP_ATOMIC);
+ sizeof(*adapter->tx_scrq), GFP_KERNEL);
if (!adapter->tx_scrq)
goto tx_failed;
@@ -1580,7 +1739,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
- sizeof(*adapter->rx_scrq), GFP_ATOMIC);
+ sizeof(*adapter->rx_scrq), GFP_KERNEL);
if (!adapter->rx_scrq)
goto rx_failed;
@@ -1589,6 +1748,51 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->rx_scrq[i]->scrq_num = i;
}
+ kfree(allqueues);
+ return 0;
+
+rx_failed:
+ kfree(adapter->tx_scrq);
+ adapter->tx_scrq = NULL;
+tx_failed:
+ for (i = 0; i < registered_queues; i++)
+ release_sub_crq_queue(adapter, allqueues[i]);
+ kfree(allqueues);
+ return -1;
+}
+
+static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
+{
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ if (!retry) {
+ /* Sub-CRQ entries are 32 byte long */
+ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+
+ if (adapter->min_tx_entries_per_subcrq > entries_page ||
+ adapter->min_rx_add_entries_per_subcrq > entries_page) {
+ dev_err(dev, "Fatal, invalid entries per sub-crq\n");
+ return;
+ }
+
+ /* Get the minimum between the queried max and the entries
+ * that fit in our PAGE_SIZE
+ */
+ adapter->req_tx_entries_per_subcrq =
+ adapter->max_tx_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_tx_entries_per_subcrq;
+ adapter->req_rx_add_entries_per_subcrq =
+ adapter->max_rx_add_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_rx_add_entries_per_subcrq;
+
+ adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
+ adapter->req_rx_queues = adapter->opt_rx_comp_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+
+ adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+ }
+
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
@@ -1642,20 +1846,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
-
- kfree(allqueues);
-
- return;
-
-rx_failed:
- kfree(adapter->tx_scrq);
- adapter->tx_scrq = NULL;
-tx_failed:
- for (i = 0; i < registered_queues; i++)
- release_sub_crq_queue(adapter, allqueues[i]);
- kfree(allqueues);
-allqueues_failed:
- ibmvnic_remove(adapter->vdev);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -1829,13 +2019,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
dma_addr_t rsp_buffer_token;
dma_addr_t buffer_token;
size_t rsp_buffer_size;
union ibmvnic_crq crq;
- unsigned long flags;
size_t buffer_size;
__be64 *tx_list_p;
__be64 *rx_list_p;
@@ -1872,11 +2060,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
dev_err(dev, "Couldn't map login rsp buffer\n");
goto buf_rsp_map_failed;
}
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dev_err(dev, "Couldn't allocate inflight_cmd\n");
- goto inflight_alloc_failed;
- }
+
adapter->login_buf = login_buffer;
adapter->login_buf_token = buffer_token;
adapter->login_buf_sz = buffer_size;
@@ -1927,20 +2111,10 @@ static void send_login(struct ibmvnic_adapter *adapter)
crq.login.cmd = LOGIN;
crq.login.ioba = cpu_to_be32(buffer_token);
crq.login.len = cpu_to_be32(buffer_size);
-
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
ibmvnic_send_crq(adapter, &crq);
return;
-inflight_alloc_failed:
- dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
- DMA_FROM_DEVICE);
buf_rsp_map_failed:
kfree(login_rsp_buffer);
buf_rsp_alloc_failed:
@@ -2064,6 +2238,10 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
+ atomic_inc(&adapter->running_cap_crqs);
+ ibmvnic_send_crq(adapter, &crq);
+
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
@@ -2242,77 +2420,22 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
kfree(error_buff);
}
-static void handle_dump_size_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- int len = be32_to_cpu(crq->request_dump_size_rsp.len);
- struct ibmvnic_inflight_cmd *inflight_cmd;
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq newcrq;
- unsigned long flags;
-
- /* allocate and map buffer */
- adapter->dump_data = kmalloc(len, GFP_KERNEL);
- if (!adapter->dump_data) {
- complete(&adapter->fw_done);
- return;
- }
-
- adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, adapter->dump_data_token)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map dump data\n");
- kfree(adapter->dump_data);
- complete(&adapter->fw_done);
- return;
- }
-
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dma_unmap_single(dev, adapter->dump_data_token, len,
- DMA_FROM_DEVICE);
- kfree(adapter->dump_data);
- complete(&adapter->fw_done);
- return;
- }
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
- newcrq.request_dump.cmd = REQUEST_DUMP;
- newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
- newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
-
- memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
- ibmvnic_send_crq(adapter, &newcrq);
-}
-
-static void handle_error_indication(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
+static void request_error_information(struct ibmvnic_adapter *adapter,
+ union ibmvnic_crq *err_crq)
{
- int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
struct ibmvnic_error_buff *error_buff;
- union ibmvnic_crq new_crq;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ union ibmvnic_crq crq;
unsigned long flags;
-
- dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
- crq->error_indication.
- flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- be32_to_cpu(crq->error_indication.error_id),
- be16_to_cpu(crq->error_indication.error_cause));
+ int rc, detail_len;
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
return;
+ detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
if (!error_buff->buff) {
kfree(error_buff);
@@ -2322,43 +2445,61 @@ static void handle_error_indication(union ibmvnic_crq *crq,
error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, error_buff->dma)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map error buffer\n");
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dma_unmap_single(dev, error_buff->dma, detail_len,
- DMA_FROM_DEVICE);
+ netdev_err(netdev, "Couldn't map error buffer\n");
kfree(error_buff->buff);
kfree(error_buff);
return;
}
error_buff->len = detail_len;
- error_buff->error_id = crq->error_indication.error_id;
+ error_buff->error_id = err_crq->error_indication.error_id;
spin_lock_irqsave(&adapter->error_list_lock, flags);
list_add_tail(&error_buff->list, &adapter->errors);
spin_unlock_irqrestore(&adapter->error_list_lock, flags);
- memset(&new_crq, 0, sizeof(new_crq));
- new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
- new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
- new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
- new_crq.request_error_info.len = cpu_to_be32(detail_len);
- new_crq.request_error_info.error_id = crq->error_indication.error_id;
+ memset(&crq, 0, sizeof(crq));
+ crq.request_error_info.first = IBMVNIC_CRQ_CMD;
+ crq.request_error_info.cmd = REQUEST_ERROR_INFO;
+ crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
+ crq.request_error_info.len = cpu_to_be32(detail_len);
+ crq.request_error_info.error_id = err_crq->error_indication.error_id;
+
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ netdev_err(netdev, "failed to request error information\n");
+ goto err_info_fail;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ netdev_err(netdev, "timeout waiting for error information\n");
+ goto err_info_fail;
+ }
+
+ return;
+
+err_info_fail:
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_del(&error_buff->list);
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+
+ kfree(error_buff->buff);
+ kfree(error_buff);
+}
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
+static void handle_error_indication(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+ dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
+ crq->error_indication.flags
+ & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ be32_to_cpu(crq->error_indication.error_id),
+ be16_to_cpu(crq->error_indication.error_cause));
- ibmvnic_send_crq(adapter, &new_crq);
+ if (be32_to_cpu(crq->error_indication.error_id))
+ request_error_information(adapter, crq);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -2428,9 +2569,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs_no_irqs(adapter);
+ release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
- init_sub_crqs(adapter, 1);
+ ibmvnic_send_req_caps(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2473,7 +2614,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
- union ibmvnic_crq crq;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -2508,11 +2648,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
}
complete(&adapter->init_done);
- memset(&crq, 0, sizeof(crq));
- crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
- crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
- ibmvnic_send_crq(adapter, &crq);
-
return 0;
}
@@ -2687,6 +2822,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
adapter->vlan_header_insertion);
break;
+ case RX_VLAN_HEADER_INSERTION:
+ adapter->rx_vlan_header_insertion =
+ be64_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
+ adapter->rx_vlan_header_insertion);
+ break;
case MAX_TX_SG_ENTRIES:
adapter->max_tx_sg_entries =
be64_to_cpu(crq->query_capability.number);
@@ -2743,524 +2884,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_crqs) == 0) {
adapter->wait_capability = false;
- init_sub_crqs(adapter, 0);
- /* We're done querying the capabilities, initialize sub-crqs */
- }
-}
-
-static void handle_control_ras_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- u8 correlator = crq->control_ras_rsp.correlator;
- struct device *dev = &adapter->vdev->dev;
- bool found = false;
- int i;
-
- if (crq->control_ras_rsp.rc.code) {
- dev_warn(dev, "Control ras failed rc=%d\n",
- crq->control_ras_rsp.rc.code);
- return;
- }
-
- for (i = 0; i < adapter->ras_comp_num; i++) {
- if (adapter->ras_comps[i].correlator == correlator) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_warn(dev, "Correlator not found on control_ras_rsp\n");
- return;
- }
-
- switch (crq->control_ras_rsp.op) {
- case IBMVNIC_TRACE_LEVEL:
- adapter->ras_comps[i].trace_level = crq->control_ras.level;
- break;
- case IBMVNIC_ERROR_LEVEL:
- adapter->ras_comps[i].error_check_level =
- crq->control_ras.level;
- break;
- case IBMVNIC_TRACE_PAUSE:
- adapter->ras_comp_int[i].paused = 1;
- break;
- case IBMVNIC_TRACE_RESUME:
- adapter->ras_comp_int[i].paused = 0;
- break;
- case IBMVNIC_TRACE_ON:
- adapter->ras_comps[i].trace_on = 1;
- break;
- case IBMVNIC_TRACE_OFF:
- adapter->ras_comps[i].trace_on = 0;
- break;
- case IBMVNIC_CHG_TRACE_BUFF_SZ:
- /* trace_buff_sz is 3 bytes, stuff it into an int */
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
- crq->control_ras_rsp.trace_buff_sz[0];
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
- crq->control_ras_rsp.trace_buff_sz[1];
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
- crq->control_ras_rsp.trace_buff_sz[2];
- break;
- default:
- dev_err(dev, "invalid op %d on control_ras_rsp",
- crq->control_ras_rsp.op);
- }
-}
-
-static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_fw_trace_entry *trace;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- dma_addr_t trace_tok;
-
- if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
- return 0;
-
- trace =
- dma_alloc_coherent(dev,
- be32_to_cpu(adapter->ras_comps[num].
- trace_buff_size), &trace_tok,
- GFP_KERNEL);
- if (!trace) {
- dev_err(dev, "Couldn't alloc trace buffer\n");
- return 0;
+ ibmvnic_send_req_caps(adapter, 0);
}
-
- memset(&crq, 0, sizeof(crq));
- crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
- crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
- crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
- crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
- crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
-
- init_completion(&adapter->fw_done);
- ibmvnic_send_crq(adapter, &crq);
- wait_for_completion(&adapter->fw_done);
-
- if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
- len =
- be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
- *ppos;
-
- copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
-
- dma_free_coherent(dev,
- be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
- trace, trace_tok);
- *ppos += len;
- return len;
-}
-
-static const struct file_operations trace_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_read,
-};
-
-static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* 1 or 0 plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t paused_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- adapter->ras_comp_int[num].paused = val ? 1 : 0;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations paused_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = paused_read,
- .write = paused_write,
-};
-
-static ssize_t tracing_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* 1 or 0 plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t tracing_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
-
- return len;
-}
-
-static const struct file_operations tracing_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = tracing_read,
- .write = tracing_write,
-};
-
-static ssize_t error_level_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* decimal max char plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t error_level_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- if (val > 9)
- val = 9;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
- crq.control_ras.level = val;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations error_level_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = error_level_read,
- .write = error_level_write,
-};
-
-static ssize_t trace_level_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* decimal max char plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
- if (val > 9)
- val = 9;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator =
- adapter->ras_comps[ras_comp_int->num].correlator;
- crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
- crq.control_ras.level = val;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations trace_level_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_level_read,
- .write = trace_level_write,
-};
-
-static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[9]; /* decimal max int plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t trace_buff_size_write(struct file *file,
- const char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator =
- adapter->ras_comps[ras_comp_int->num].correlator;
- crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
- /* trace_buff_sz is 3 bytes, stuff an int into it */
- crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
- crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
- crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations trace_size_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_buff_size_read,
- .write = trace_buff_size_write,
-};
-
-static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct dentry *dir_ent;
- struct dentry *ent;
- int i;
-
- debugfs_remove_recursive(adapter->ras_comps_ent);
-
- adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
- adapter->debugfs_dir);
- if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
- dev_info(dev, "debugfs create ras_comps dir failed\n");
- return;
- }
-
- for (i = 0; i < adapter->ras_comp_num; i++) {
- dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
- adapter->ras_comps_ent);
- if (!dir_ent || IS_ERR(dir_ent)) {
- dev_info(dev, "debugfs create %s dir failed\n",
- adapter->ras_comps[i].name);
- continue;
- }
-
- adapter->ras_comp_int[i].adapter = adapter;
- adapter->ras_comp_int[i].num = i;
- adapter->ras_comp_int[i].desc_blob.data =
- &adapter->ras_comps[i].description;
- adapter->ras_comp_int[i].desc_blob.size =
- sizeof(adapter->ras_comps[i].description);
-
- /* Don't need to remember the dentry's because the debugfs dir
- * gets removed recursively
- */
- ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
- &adapter->ras_comp_int[i].desc_blob);
- ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &trace_size_ops);
- ent = debugfs_create_file("trace_level",
- S_IRUGO |
- (adapter->ras_comps[i].trace_level !=
- 0xFF ? S_IWUSR : 0),
- dir_ent, &adapter->ras_comp_int[i],
- &trace_level_ops);
- ent = debugfs_create_file("error_level",
- S_IRUGO |
- (adapter->
- ras_comps[i].error_check_level !=
- 0xFF ? S_IWUSR : 0),
- dir_ent, &adapter->ras_comp_int[i],
- &trace_level_ops);
- ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &tracing_ops);
- ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &paused_ops);
- ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
- &adapter->ras_comp_int[i],
- &trace_ops);
- }
-}
-
-static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq newcrq;
-
- adapter->ras_comps = dma_alloc_coherent(dev, len,
- &adapter->ras_comps_tok,
- GFP_KERNEL);
- if (!adapter->ras_comps) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't alloc fw comps buffer\n");
- return;
- }
-
- adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
- sizeof(struct ibmvnic_fw_comp_internal),
- GFP_KERNEL);
- if (!adapter->ras_comp_int)
- dma_free_coherent(dev, len, adapter->ras_comps,
- adapter->ras_comps_tok);
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
- newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
- newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
- newcrq.request_ras_comps.len = cpu_to_be32(len);
- ibmvnic_send_crq(adapter, &newcrq);
-}
-
-static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp2;
- unsigned long flags;
- unsigned long flags2;
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
- switch (inflight_cmd->crq.generic.cmd) {
- case LOGIN:
- dma_unmap_single(dev, adapter->login_buf_token,
- adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz,
- DMA_BIDIRECTIONAL);
- kfree(adapter->login_rsp_buf);
- kfree(adapter->login_buf);
- break;
- case REQUEST_DUMP:
- complete(&adapter->fw_done);
- break;
- case REQUEST_ERROR_INFO:
- spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry_safe(error_buff, tmp2,
- &adapter->errors, list) {
- dma_unmap_single(dev, error_buff->dma,
- error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- list_del(&error_buff->list);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock,
- flags2);
- break;
- }
- list_del(&inflight_cmd->list);
- kfree(inflight_cmd);
- }
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
static void ibmvnic_xport_event(struct work_struct *work)
@@ -3271,7 +2896,6 @@ static void ibmvnic_xport_event(struct work_struct *work)
struct device *dev = &adapter->vdev->dev;
long rc;
- ibmvnic_free_inflight(adapter);
release_sub_crqs(adapter);
if (adapter->migrated) {
rc = ibmvnic_reenable_crq_queue(adapter);
@@ -3290,11 +2914,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_generic_crq *gen_crq = &crq->generic;
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
+ u64 *u64_crq = (u64 *)crq;
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- ((unsigned long int *)crq)[0],
- ((unsigned long int *)crq)[1]);
+ (unsigned long int)cpu_to_be64(u64_crq[0]),
+ (unsigned long int)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
@@ -3374,9 +2999,14 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
handle_login_rsp(crq, adapter);
break;
case LOGICAL_LINK_STATE_RSP:
- netdev_dbg(netdev, "Got Logical Link State Response\n");
+ netdev_dbg(netdev,
+ "Got Logical Link State Response, state: %d rc: %d\n",
+ crq->logical_link_state_rsp.link_state,
+ crq->logical_link_state_rsp.rc.code);
adapter->logical_link_state =
crq->logical_link_state_rsp.link_state;
+ adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
+ complete(&adapter->init_done);
break;
case LINK_STATE_INDICATION:
netdev_dbg(netdev, "Got Logical Link State Indication\n");
@@ -3401,14 +3031,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Statistics Response\n");
complete(&adapter->stats_done);
break;
- case REQUEST_DUMP_SIZE_RSP:
- netdev_dbg(netdev, "Got Request Dump Size Response\n");
- handle_dump_size_rsp(crq, adapter);
- break;
- case REQUEST_DUMP_RSP:
- netdev_dbg(netdev, "Got Request Dump Response\n");
- complete(&adapter->fw_done);
- break;
case QUERY_IP_OFFLOAD_RSP:
netdev_dbg(netdev, "Got Query IP offload Response\n");
handle_query_ip_offload_rsp(adapter);
@@ -3421,26 +3043,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
sizeof(adapter->ip_offload_ctrl),
DMA_TO_DEVICE);
- /* We're done with the queries, perform the login */
- send_login(adapter);
- break;
- case REQUEST_RAS_COMP_NUM_RSP:
- netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
- if (crq->request_ras_comp_num_rsp.rc.code == 10) {
- netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
- break;
- }
- adapter->ras_comp_num =
- be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
- handle_request_ras_comp_num_rsp(crq, adapter);
- break;
- case REQUEST_RAS_COMPS_RSP:
- netdev_dbg(netdev, "Got Request RAS Comps Response\n");
- handle_request_ras_comps_rsp(crq, adapter);
- break;
- case CONTROL_RAS_RSP:
- netdev_dbg(netdev, "Got Control RAS Response\n");
- handle_control_ras_rsp(crq, adapter);
+ complete(&adapter->init_done);
break;
case COLLECT_FW_TRACE_RSP:
netdev_dbg(netdev, "Got Collect firmware trace Response\n");
@@ -3455,12 +3058,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
{
struct ibmvnic_adapter *adapter = instance;
- unsigned long flags;
- spin_lock_irqsave(&adapter->crq.lock, flags);
- vio_disable_interrupts(adapter->vdev);
tasklet_schedule(&adapter->tasklet);
- spin_unlock_irqrestore(&adapter->crq.lock, flags);
return IRQ_HANDLED;
}
@@ -3468,32 +3067,23 @@ static void ibmvnic_tasklet(void *data)
{
struct ibmvnic_adapter *adapter = data;
struct ibmvnic_crq_queue *queue = &adapter->crq;
- struct vio_dev *vdev = adapter->vdev;
union ibmvnic_crq *crq;
unsigned long flags;
bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- vio_disable_interrupts(vdev);
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
- vio_enable_interrupts(vdev);
- crq = ibmvnic_next_crq(adapter);
- if (crq) {
- vio_disable_interrupts(vdev);
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- } else {
- /* remain in tasklet until all
- * capabilities responses are received
- */
- if (!adapter->wait_capability)
- done = true;
- }
+
+ /* remain in tasklet until all
+ * capabilities responses are received
+ */
+ if (!adapter->wait_capability)
+ done = true;
}
/* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received
@@ -3547,12 +3137,15 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
return rc;
}
-static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
+static void release_crq_queue(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_crq_queue *crq = &adapter->crq;
struct vio_dev *vdev = adapter->vdev;
long rc;
+ if (!crq->msgs)
+ return;
+
netdev_dbg(adapter->netdev, "Releasing CRQ\n");
free_irq(vdev->irq, adapter);
tasklet_kill(&adapter->tasklet);
@@ -3563,15 +3156,19 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
+ crq->msgs = NULL;
}
-static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
+static int init_crq_queue(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_crq_queue *crq = &adapter->crq;
struct device *dev = &adapter->vdev->dev;
struct vio_dev *vdev = adapter->vdev;
int rc, retrc = -ENOMEM;
+ if (crq->msgs)
+ return 0;
+
crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
/* Should we allocate more than one page? */
@@ -3633,48 +3230,10 @@ reg_crq_failed:
dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)crq->msgs);
+ crq->msgs = NULL;
return retrc;
}
-/* debugfs for dump */
-static int ibmvnic_dump_show(struct seq_file *seq, void *v)
-{
- struct net_device *netdev = seq->private;
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
- crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
-
- init_completion(&adapter->fw_done);
- ibmvnic_send_crq(adapter, &crq);
- wait_for_completion(&adapter->fw_done);
-
- seq_write(seq, adapter->dump_data, adapter->dump_data_size);
-
- dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
- DMA_BIDIRECTIONAL);
-
- kfree(adapter->dump_data);
-
- return 0;
-}
-
-static int ibmvnic_dump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ibmvnic_dump_show, inode->i_private);
-}
-
-static const struct file_operations ibmvnic_dump_ops = {
- .owner = THIS_MODULE,
- .open = ibmvnic_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void handle_crq_init_rsp(struct work_struct *work)
{
struct ibmvnic_adapter *adapter = container_of(work,
@@ -3702,26 +3261,6 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
}
- do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
- release_sub_crqs_no_irqs(adapter);
-
- reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
- timeout)) {
- dev_err(dev, "Passive init timeout\n");
- goto task_failed;
- }
- }
- } while (adapter->renegotiate);
- rc = init_sub_crq_irqs(adapter);
-
- if (rc)
- goto task_failed;
-
- netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
if (adapter->failover) {
@@ -3753,14 +3292,40 @@ task_failed:
dev_err(dev, "Passive initialization was not successful\n");
}
-static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+static int ibmvnic_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
+ init_completion(&adapter->init_done);
+ ibmvnic_send_crq_init(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ release_crq_queue(adapter);
+ return -1;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed\n");
+ release_crq_queue(adapter);
+ }
+
+ return rc;
+}
+
+static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
- struct dentry *ent;
- char buf[17]; /* debugfs name buf */
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3798,91 +3363,27 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->stats_lock);
- rc = ibmvnic_init_crq_queue(adapter);
- if (rc) {
- dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
- goto free_netdev;
- }
-
INIT_LIST_HEAD(&adapter->errors);
- INIT_LIST_HEAD(&adapter->inflight);
spin_lock_init(&adapter->error_list_lock);
- spin_lock_init(&adapter->inflight_lock);
-
- adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
- sizeof(struct ibmvnic_statistics),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(&dev->dev, "Couldn't map stats buffer\n");
- rc = -ENOMEM;
- goto free_crq;
- }
-
- snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
- ent = debugfs_create_dir(buf, NULL);
- if (!ent || IS_ERR(ent)) {
- dev_info(&dev->dev, "debugfs create directory failed\n");
- adapter->debugfs_dir = NULL;
- } else {
- adapter->debugfs_dir = ent;
- ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
- netdev, &ibmvnic_dump_ops);
- if (!ent || IS_ERR(ent)) {
- dev_info(&dev->dev,
- "debugfs create dump file failed\n");
- adapter->debugfs_dump = NULL;
- } else {
- adapter->debugfs_dump = ent;
- }
- }
- init_completion(&adapter->init_done);
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout))
- return 0;
-
- do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
- release_sub_crqs_no_irqs(adapter);
-
- reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
- timeout))
- return 0;
- }
- } while (adapter->renegotiate);
-
- rc = init_sub_crq_irqs(adapter);
+ rc = ibmvnic_init(adapter);
if (rc) {
- dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
- goto free_debugfs;
+ free_netdev(netdev);
+ return rc;
}
- netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ adapter->is_closed = false;
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_sub_crqs;
+ free_netdev(netdev);
+ return rc;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
-
-free_sub_crqs:
- release_sub_crqs(adapter);
-free_debugfs:
- if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
- debugfs_remove_recursive(adapter->debugfs_dir);
-free_crq:
- ibmvnic_release_crq_queue(adapter);
-free_netdev:
- free_netdev(netdev);
- return rc;
}
static int ibmvnic_remove(struct vio_dev *dev)
@@ -3892,23 +3393,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
unregister_netdev(netdev);
+ release_resources(adapter);
release_sub_crqs(adapter);
-
- ibmvnic_release_crq_queue(adapter);
-
- if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
- debugfs_remove_recursive(adapter->debugfs_dir);
-
- dma_unmap_single(&dev->dev, adapter->stats_token,
- sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
-
- if (adapter->ras_comps)
- dma_free_coherent(&dev->dev,
- adapter->ras_comp_num *
- sizeof(struct ibmvnic_fw_component),
- adapter->ras_comps, adapter->ras_comps_tok);
-
- kfree(adapter->ras_comp_int);
+ release_crq_queue(adapter);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
@@ -3933,7 +3420,6 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
adapter = netdev_priv(netdev);
ret += PAGE_SIZE; /* the crq message queue */
- ret += adapter->bounce_buffer_size;
ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1993b42666f7..a69979f6f19d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -518,8 +518,8 @@ struct ibmvnic_change_mac_addr {
u8 first;
u8 cmd;
u8 mac_addr[6];
- struct ibmvnic_rc rc;
u8 reserved[4];
+ struct ibmvnic_rc rc;
} __packed __aligned(8);
struct ibmvnic_multicast_ctrl {
@@ -733,6 +733,7 @@ enum ibmvnic_capabilities {
REQ_MTU = 21,
MAX_MULTICAST_FILTERS = 22,
VLAN_HEADER_INSERTION = 23,
+ RX_VLAN_HEADER_INSERTION = 24,
MAX_TX_SG_ENTRIES = 25,
RX_SG_SUPPORTED = 26,
RX_SG_REQUESTED = 27,
@@ -772,20 +773,10 @@ enum ibmvnic_commands {
ERROR_INDICATION = 0x08,
REQUEST_ERROR_INFO = 0x09,
REQUEST_ERROR_RSP = 0x89,
- REQUEST_DUMP_SIZE = 0x0A,
- REQUEST_DUMP_SIZE_RSP = 0x8A,
- REQUEST_DUMP = 0x0B,
- REQUEST_DUMP_RSP = 0x8B,
LOGICAL_LINK_STATE = 0x0C,
LOGICAL_LINK_STATE_RSP = 0x8C,
REQUEST_STATISTICS = 0x0D,
REQUEST_STATISTICS_RSP = 0x8D,
- REQUEST_RAS_COMP_NUM = 0x0E,
- REQUEST_RAS_COMP_NUM_RSP = 0x8E,
- REQUEST_RAS_COMPS = 0x0F,
- REQUEST_RAS_COMPS_RSP = 0x8F,
- CONTROL_RAS = 0x10,
- CONTROL_RAS_RSP = 0x90,
COLLECT_FW_TRACE = 0x11,
COLLECT_FW_TRACE_RSP = 0x91,
LINK_STATE_INDICATION = 0x12,
@@ -806,8 +797,6 @@ enum ibmvnic_commands {
ACL_CHANGE_INDICATION = 0x1A,
ACL_QUERY = 0x1B,
ACL_QUERY_RSP = 0x9B,
- REQUEST_DEBUG_STATS = 0x1C,
- REQUEST_DEBUG_STATS_RSP = 0x9C,
QUERY_MAP = 0x1D,
QUERY_MAP_RSP = 0x9D,
REQUEST_MAP = 0x1E,
@@ -880,7 +869,6 @@ struct ibmvnic_tx_buff {
int index;
int pool_index;
bool last_frag;
- bool used_bounce;
union sub_crq indir_arr[6];
u8 hdr_data[140];
dma_addr_t indir_dma;
@@ -925,18 +913,6 @@ struct ibmvnic_error_buff {
__be32 error_id;
};
-struct ibmvnic_fw_comp_internal {
- struct ibmvnic_adapter *adapter;
- int num;
- struct debugfs_blob_wrapper desc_blob;
- int paused;
-};
-
-struct ibmvnic_inflight_cmd {
- union ibmvnic_crq crq;
- struct list_head list;
-};
-
struct ibmvnic_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
@@ -948,12 +924,8 @@ struct ibmvnic_adapter {
dma_addr_t ip_offload_ctrl_tok;
bool migrated;
u32 msg_enable;
- void *bounce_buffer;
- int bounce_buffer_size;
- dma_addr_t bounce_buffer_dma;
/* Statistics */
- struct net_device_stats net_stats;
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
struct completion stats_done;
@@ -992,26 +964,12 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
bool closing;
struct completion init_done;
+ int init_done_rc;
struct list_head errors;
spinlock_t error_list_lock;
- /* debugfs */
- struct dentry *debugfs_dir;
- struct dentry *debugfs_dump;
struct completion fw_done;
- char *dump_data;
- dma_addr_t dump_data_token;
- int dump_data_size;
- int ras_comp_num;
- struct ibmvnic_fw_component *ras_comps;
- struct ibmvnic_fw_comp_internal *ras_comp_int;
- dma_addr_t ras_comps_tok;
- struct dentry *ras_comps_ent;
-
- /* in-flight commands that allocate and/or map memory*/
- struct list_head inflight;
- spinlock_t inflight_lock;
/* partner capabilities */
u64 min_tx_queues;
@@ -1037,6 +995,7 @@ struct ibmvnic_adapter {
u64 req_mtu;
u64 max_multicast_filters;
u64 vlan_header_insertion;
+ u64 rx_vlan_header_insertion;
u64 max_tx_sg_entries;
u64 rx_sg_supported;
u64 rx_sg_requested;
@@ -1052,4 +1011,5 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_xport;
struct tasklet_struct tasklet;
bool failover;
+ bool is_closed;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1349b45f014d..1542a2158e96 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -235,17 +235,6 @@ config I40E_DCB
If unsure, say N.
-config I40E_FCOE
- bool "Fibre Channel over Ethernet (FCoE)"
- default n
- depends on I40E && DCB && FCOE
- ---help---
- Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
- in the driver. This will create new netdev for exclusive FCoE
- use with XL710 FCoE offloads enabled.
-
- If unsure, say N.
-
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb885ca2..ec8aa4562cc9 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -103,104 +103,104 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 supported, advertising;
if (hw->media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= hw->autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
-
- if (hw->mac_type == e1000_82543)
- ecmd->transceiver = XCVR_EXTERNAL;
- else
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
-
- if (hw->mac_type >= e1000_82545)
- ecmd->transceiver = XCVR_INTERNAL;
- else
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (er32(STATUS) & E1000_STATUS_LU) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 1; MDI => 0 */
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+ cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X : ETH_TP_MDI);
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -209,32 +209,31 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
else
- hw->autoneg_advertised = ecmd->advertising |
+ hw->autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
- ecmd->advertising = hw->autoneg_advertised;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->mdix = AUTO_ALL_MODES;
else
- hw->mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -1875,8 +1874,6 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -1901,6 +1898,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 93fc6c67306b..bd8b05fe8258 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -131,7 +131,6 @@ static void e1000_watchdog(struct work_struct *work);
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
@@ -846,7 +845,6 @@ static const struct net_device_ops e1000_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
.ndo_set_rx_mode = e1000_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_tx_timeout = e1000_tx_timeout,
@@ -3530,19 +3528,6 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the watchdog.
- **/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
-{
- /* only return the current stats */
- return &netdev->stats;
-}
-
-/**
* e1000_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a29b12e80855..c7c994eb410e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -135,7 +135,8 @@ enum e1000_boards {
board_pchlan,
board_pch2lan,
board_pch_lpt,
- board_pch_spt
+ board_pch_spt,
+ board_pch_cnp
};
struct e1000_ps_page {
@@ -378,18 +379,22 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
*/
-#define INCVALUE_96MHz 125
-#define INCVALUE_SHIFT_96MHz 17
-#define INCPERIOD_SHIFT_96MHz 2
-#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+#define INCVALUE_96MHZ 125
+#define INCVALUE_SHIFT_96MHZ 17
+#define INCPERIOD_SHIFT_96MHZ 2
+#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
-#define INCVALUE_25MHz 40
-#define INCVALUE_SHIFT_25MHz 18
-#define INCPERIOD_25MHz 1
+#define INCVALUE_25MHZ 40
+#define INCVALUE_SHIFT_25MHZ 18
+#define INCPERIOD_25MHZ 1
-#define INCVALUE_24MHz 125
-#define INCVALUE_SHIFT_24MHz 14
-#define INCPERIOD_24MHz 3
+#define INCVALUE_24MHZ 125
+#define INCVALUE_SHIFT_24MHZ 14
+#define INCPERIOD_24MHZ 3
+
+#define INCVALUE_38400KHZ 26
+#define INCVALUE_SHIFT_38400KHZ 19
+#define INCPERIOD_38400KHZ 1
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
@@ -515,6 +520,7 @@ extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
+extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 7aff68a4a4df..e23dbd9190d6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -117,55 +117,52 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed;
+ u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
- ecmd->supported &= ~SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
+ supported &= ~SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
speed = adapter->link_speed;
- ecmd->duplex = adapter->link_duplex - 1;
+ cmd->base.duplex = adapter->link_duplex - 1;
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
@@ -179,30 +176,36 @@ static int e1000_get_settings(struct net_device *netdev,
speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
}
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ cmd->base.speed = speed;
+ cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
+ ETH_TP_MDI_X : ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
if (hw->phy.media_type != e1000_media_type_copper)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -262,12 +265,16 @@ err_inval:
return -EINVAL;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int ret_val = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
pm_runtime_get_sync(netdev->dev.parent);
@@ -285,14 +292,14 @@ static int e1000_set_settings(struct net_device *netdev,
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = -EOPNOTSUPP;
goto out;
}
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
ret_val = -EINVAL;
goto out;
@@ -302,35 +309,35 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP | ADVERTISED_Autoneg;
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
ret_val = -EINVAL;
goto out;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -904,19 +911,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ /* fall through */
+ case e1000_pch_cnp:
mask |= BIT(18);
break;
default:
break;
}
- if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
+ if (mac->type >= e1000_pch_lpt)
wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- if ((mac->type == e1000_pch_lpt) ||
- (mac->type == e1000_pch_spt)) {
+ if (mac->type >= e1000_pch_lpt) {
/* Cannot test write-protected SHRAL[n] registers */
if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
continue;
@@ -1525,7 +1533,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, fext_nvm11, tarc0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -1569,6 +1577,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch_spt:
+ case e1000_pch_cnp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -2313,8 +2322,6 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -2342,6 +2349,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 4e733bf1a38e..66bd5060a65b 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -96,6 +96,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
+#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD
+#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE
+#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB
+#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC
#define E1000_REVISION_4 4
@@ -118,6 +122,7 @@ enum e1000_mac_type {
e1000_pch2lan,
e1000_pch_lpt,
e1000_pch_spt,
+ e1000_pch_cnp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f3aaca743ea3..68ea8b4555ab 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -237,7 +237,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
/* Unforce SMBus mode in PHY */
@@ -333,6 +333,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -474,6 +475,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -607,7 +609,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_flash_sw;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
/* in SPT, gfpreg doesn't exist. NVM size is taken from the
* STRAP register. This is because in SPT the GbE Flash region
* is no longer accessed through the flash registers. Instead,
@@ -715,6 +717,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -732,7 +735,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
+ if (mac->type >= e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
@@ -1399,9 +1402,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) && link) {
+ if ((hw->mac.type >= e1000_pch2lan) && link) {
u16 speed, duplex;
e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
@@ -1412,7 +1413,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
emi_val = 0;
- } else if (hw->mac.type == e1000_pch_spt &&
+ } else if (hw->mac.type >= e1000_pch_spt &&
duplex == FULL_DUPLEX && speed != SPEED_1000) {
tipg_reg |= 0xC;
emi_val = 1;
@@ -1435,8 +1436,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
emi_addr = I217_RX_CONFIG;
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
- if (hw->mac.type == e1000_pch_lpt ||
- hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u16 phy_reg;
e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
@@ -1452,7 +1452,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
u16 data;
u16 ptr_gap;
@@ -1502,7 +1502,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* on power up.
* Set the Beacon Duration for I217 to 8 usec
*/
- if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 mac_reg;
mac_reg = er32(FEXTNVM4);
@@ -1520,8 +1520,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
}
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Set platform power management values for
* Latency Tolerance Reporting (LTR)
*/
@@ -1533,15 +1532,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
- /* FEXTNVM6 K1-off workaround */
- if (hw->mac.type == e1000_pch_spt) {
- u32 pcieanacfg = er32(PCIEANACFG);
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 fextnvm6 = er32(FEXTNVM6);
- if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
- fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
- else
- fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ if (hw->mac.type == e1000_pch_spt) {
+ /* FEXTNVM6 K1-off workaround - for SPT only */
+ u32 pcieanacfg = er32(PCIEANACFG);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ }
ew32(FEXTNVM6, fextnvm6);
}
@@ -1640,6 +1642,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2091,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3125,6 +3129,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) {
case e1000_pch_spt:
+ case e1000_pch_cnp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -3380,7 +3385,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
else
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
@@ -3399,7 +3404,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
else
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
@@ -3423,7 +3428,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS,
hsfsts.regval & 0xFFFF);
else
@@ -3450,13 +3455,13 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
u32 i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -3527,7 +3532,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, only 32 bits access is supported,
* so this function should not be called.
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
return -E1000_ERR_NVM;
else
ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
@@ -3634,8 +3639,7 @@ static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val = -E1000_ERR_NVM;
u8 count = 0;
- if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
- hw->mac.type != e1000_pch_spt)
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
return -E1000_ERR_NVM;
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -4068,6 +4072,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -4153,7 +4158,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
} else {
@@ -4173,7 +4178,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
@@ -4185,7 +4190,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
* not flash. Therefore, only 32 bit access is
* supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -4243,7 +4248,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
}
@@ -4259,7 +4264,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
>> 16;
else
@@ -4272,7 +4277,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
* not flash. Therefore, only 32 bit access is
* supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -4464,14 +4469,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval =
er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS,
hsflctl.regval << 16);
else
@@ -4894,8 +4899,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
reg = er32(PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
ew32(PBECCSTS, reg);
@@ -5299,7 +5303,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
(device_id == E1000_DEV_ID_PCH_I218_V3) ||
- (hw->mac.type == e1000_pch_spt)) {
+ (hw->mac.type >= e1000_pch_spt)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5865,7 +5869,8 @@ const struct e1000_info e1000_pch2_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
@@ -5914,3 +5919,23 @@ const struct e1000_info e1000_pch_spt_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_cnp_info = {
+ .mac = e1000_pch_cnp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e9af89ad039c..b3679728caac 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -71,6 +71,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch2lan] = &e1000_pch2_info,
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
+ [board_pch_cnp] = &e1000_pch_cnp_info,
};
struct e1000_reg_info {
@@ -1791,8 +1792,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt))) {
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -1872,8 +1872,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt))) {
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -2241,8 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC);
- } else if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ } else if (hw->mac.type >= e1000_pch_lpt) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
@@ -3000,8 +2998,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
hw->mac.ops.config_collision_dist(hw);
- /* SPT Si errata workaround to avoid data corruption */
- if (hw->mac.type == e1000_pch_spt) {
+ /* SPT and CNP Si errata workaround to avoid data corruption */
+ if (hw->mac.type >= e1000_pch_spt) {
u32 reg_val;
reg_val = er32(IOSFPC);
@@ -3497,8 +3495,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
/* Make sure clock is enabled on I217/I218/I219 before checking
* the frequency
*/
- if (((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) &&
+ if ((hw->mac.type >= e1000_pch_lpt) &&
!(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
@@ -3511,37 +3508,58 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHZ;
+ incvalue = INCVALUE_96MHZ;
+ shift = INCVALUE_SHIFT_96MHZ;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
+ break;
case e1000_pch_lpt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
- incperiod = INCPERIOD_96MHz;
- incvalue = INCVALUE_96MHz;
- shift = INCVALUE_SHIFT_96MHz;
- adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ incperiod = INCPERIOD_96MHZ;
+ incvalue = INCVALUE_96MHZ;
+ shift = INCVALUE_SHIFT_96MHZ;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
} else {
/* Stable 25MHz frequency */
- incperiod = INCPERIOD_25MHz;
- incvalue = INCVALUE_25MHz;
- shift = INCVALUE_SHIFT_25MHz;
+ incperiod = INCPERIOD_25MHZ;
+ incvalue = INCVALUE_25MHZ;
+ shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
}
break;
case e1000_pch_spt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
- incperiod = INCPERIOD_24MHz;
- incvalue = INCVALUE_24MHz;
- shift = INCVALUE_SHIFT_24MHz;
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
adapter->cc.shift = shift;
break;
}
return -EINVAL;
+ case e1000_pch_cnp:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
+ adapter->cc.shift = shift;
+ } else {
+ /* Stable 38400KHz frequency */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ }
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
- incperiod = INCPERIOD_25MHz;
- incvalue = INCVALUE_25MHz;
- shift = INCVALUE_SHIFT_25MHz;
+ incperiod = INCPERIOD_25MHZ;
+ incvalue = INCVALUE_25MHZ;
+ shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
break;
default:
@@ -4032,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
fc->refresh_time = 0x0400;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4076,7 +4095,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4151,7 +4170,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
- if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) {
u32 reg;
/* Fextnvm7 @ 0xe4[2] = 1 */
@@ -4285,7 +4304,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
if (!pci_channel_offline(adapter->pdev)) {
if (reset)
e1000e_reset(adapter);
- else if (hw->mac.type == e1000_pch_spt)
+ else if (hw->mac.type >= e1000_pch_spt)
e1000_flush_desc_rings(adapter);
}
e1000_clean_tx_ring(adapter->tx_ring);
@@ -4973,8 +4992,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgpdc += er32(MGTPDC);
/* Correctable ECC Errors */
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -6348,8 +6366,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- } else if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ } else if (hw->mac.type >= e1000_pch_lpt) {
if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
@@ -7508,6 +7525,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 34cc3be0df8e..b366885487a8 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -301,8 +301,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
+ case e1000_pch_cnp:
+ if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 52b979443cde..689c413b7782 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -65,14 +65,16 @@ enum fm10k_ring_state_t {
__FM10K_TX_DETECT_HANG,
__FM10K_HANG_CHECK_ARMED,
__FM10K_TX_XPS_INIT_DONE,
+ /* This must be last and is used to calculate BITMAP size */
+ __FM10K_TX_STATE_SIZE__,
};
#define check_for_tx_hang(ring) \
- test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ test_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
#define set_check_for_tx_hang(ring) \
- set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ set_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
#define clear_check_for_tx_hang(ring) \
- clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
struct fm10k_tx_buffer {
struct fm10k_tx_desc *next_to_watch;
@@ -126,7 +128,7 @@ struct fm10k_ring {
struct fm10k_rx_buffer *rx_buffer;
};
u32 __iomem *tail;
- unsigned long state;
+ DECLARE_BITMAP(state, __FM10K_TX_STATE_SIZE__);
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@@ -249,18 +251,46 @@ struct fm10k_udp_port {
/* one work queue for entire driver */
extern struct workqueue_struct *fm10k_workqueue;
+/* The following enumeration contains flags which indicate or enable modified
+ * driver behaviors. To avoid race conditions, the flags are stored in
+ * a BITMAP in the fm10k_intfc structure. The BITMAP should be accessed using
+ * atomic *_bit() operations.
+ */
+enum fm10k_flags_t {
+ FM10K_FLAG_RESET_REQUESTED,
+ FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ FM10K_FLAG_SWPRI_CONFIG,
+ /* __FM10K_FLAGS_SIZE__ is used to calculate the size of
+ * interface->flags and must be the last value in this
+ * enumeration.
+ */
+ __FM10K_FLAGS_SIZE__
+};
+
+enum fm10k_state_t {
+ __FM10K_RESETTING,
+ __FM10K_DOWN,
+ __FM10K_SERVICE_SCHED,
+ __FM10K_SERVICE_REQUEST,
+ __FM10K_SERVICE_DISABLE,
+ __FM10K_MBX_LOCK,
+ __FM10K_LINK_DOWN,
+ __FM10K_UPDATING_STATS,
+ /* This value must be last and determines the BITMAP size */
+ __FM10K_STATE_SIZE__,
+};
+
struct fm10k_intfc {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
struct pci_dev *pdev;
- unsigned long state;
+ DECLARE_BITMAP(state, __FM10K_STATE_SIZE__);
+
+ /* Access flag values using atomic *_bit() operations */
+ DECLARE_BITMAP(flags, __FM10K_FLAGS_SIZE__);
- u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3))
int xcast_mode;
/* Tx fast path data */
@@ -352,22 +382,12 @@ struct fm10k_intfc {
u16 vid;
};
-enum fm10k_state_t {
- __FM10K_RESETTING,
- __FM10K_DOWN,
- __FM10K_SERVICE_SCHED,
- __FM10K_SERVICE_DISABLE,
- __FM10K_MBX_LOCK,
- __FM10K_LINK_DOWN,
- __FM10K_UPDATING_STATS,
-};
-
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
/* busy loop if we cannot obtain the lock as some calls
* such as ndo_set_rx_mode may be made in atomic context
*/
- while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
+ while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state))
udelay(20);
}
@@ -375,12 +395,12 @@ static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
- clear_bit(__FM10K_MBX_LOCK, &interface->state);
+ clear_bit(__FM10K_MBX_LOCK, interface->state);
}
static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
- return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
+ return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state);
}
/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0c84fef750f4..c7234f35f8ff 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -562,7 +562,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
return 0;
}
- while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+ while (test_and_set_bit(__FM10K_RESETTING, interface->state))
usleep_range(1000, 2000);
if (!netif_running(interface->netdev)) {
@@ -648,7 +648,7 @@ err_setup:
fm10k_up(interface);
vfree(temp_ring);
clear_reset:
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
}
@@ -716,7 +716,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V4_FLOW:
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case SCTP_V4_FLOW:
@@ -732,7 +733,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
@@ -764,12 +766,13 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \
- FM10K_FLAG_RSS_FIELD_IPV6_UDP)
static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
struct ethtool_rxnfc *nfc)
{
- u32 flags = interface->flags;
+ int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
+ int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -793,10 +796,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+ clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+ set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
break;
default:
return -EINVAL;
@@ -808,10 +813,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+ clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+ set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
break;
default:
return -EINVAL;
@@ -835,28 +842,41 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
}
- /* if we changed something we need to update flags */
- if (flags != interface->flags) {
+ /* If something changed we need to update the MRQC register. Note that
+ * test_bit() is guaranteed to return strictly 0 or 1, so testing for
+ * equality is safe.
+ */
+ if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags)) ||
+ (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags))) {
struct fm10k_hw *hw = &interface->hw;
+ bool warn = false;
u32 mrqc;
- if ((flags & UDP_RSS_FLAGS) &&
- !(interface->flags & UDP_RSS_FLAGS))
- netif_warn(interface, drv, interface->netdev,
- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
-
- interface->flags = flags;
-
/* Perform hash on these packet types */
mrqc = FM10K_MRQC_IPV4 |
FM10K_MRQC_TCP_IPV4 |
FM10K_MRQC_IPV6 |
FM10K_MRQC_TCP_IPV6;
- if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags)) {
mrqc |= FM10K_MRQC_UDP_IPV4;
- if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ warn = true;
+ }
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags)) {
mrqc |= FM10K_MRQC_UDP_IPV6;
+ warn = true;
+ }
+
+ /* If we enable UDP RSS display a warning that this may cause
+ * fragmented UDP packets to arrive out of order.
+ */
+ if (warn)
+ netif_warn(interface, drv, interface->netdev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
}
@@ -939,7 +959,7 @@ static void fm10k_self_test(struct net_device *dev,
memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
- if (FM10K_REMOVED(hw)) {
+ if (FM10K_REMOVED(hw->hw_addr)) {
netif_err(interface, drv, dev,
"Interface removed - test blocked\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5bb233a9614c..9dffaba85ae6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 - 2016 Intel Corporation.";
+ "Copyright(c) 2013 - 2017 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -1175,13 +1175,13 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
/* update completed stats and continue */
tx_ring->tx_stats.tx_done_old = tx_done;
/* reset the countdown */
- clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+ clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
return false;
}
/* make sure it is true for two checks in a row */
- return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+ return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
}
/**
@@ -1191,9 +1191,9 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
{
/* Do the reset outside of interrupt context */
- if (!test_bit(__FM10K_DOWN, &interface->state)) {
+ if (!test_bit(__FM10K_DOWN, interface->state)) {
interface->tx_timeout_count++;
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
fm10k_service_event_schedule(interface);
}
}
@@ -1214,7 +1214,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return true;
tx_buffer = &tx_ring->tx_buffer[i];
@@ -1344,7 +1344,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__FM10K_DOWN, &interface->state)) {
+ !test_bit(__FM10K_DOWN, interface->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 01db688cf539..24f2f6f86f5a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -737,6 +737,23 @@ static void fm10k_tx_timeout(struct net_device *netdev)
}
}
+/**
+ * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
+ * @interface: board private structure
+ *
+ * This function checks if the PF interface's mailbox is ready before queueing
+ * mailbox messages for transmission. This will prevent filling the TX mailbox
+ * queue when the receiver is not ready. VF interfaces are exempt from this
+ * check since it will block all PF-VF mailbox messages from being sent from
+ * the VF to the PF at initialization.
+ **/
+static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
+{
+ struct fm10k_hw *hw = &interface->hw;
+
+ return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
+}
+
static int fm10k_uc_vlan_unsync(struct net_device *netdev,
const unsigned char *uc_addr)
{
@@ -745,12 +762,15 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err;
+ int err = -EHOSTDOWN;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0);
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr,
+ vid, set, 0);
+
if (err)
return err;
@@ -766,12 +786,14 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err;
+ int err = -EHOSTDOWN;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+
if (err)
return err;
@@ -822,7 +844,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
/* Do not throw an error if the interface is down. We will sync once
* we come up
*/
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return 0;
fm10k_mbx_lock(interface);
@@ -834,9 +856,13 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
goto err_out;
}
- /* update our base MAC address */
- err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr,
- vid, set, 0);
+ /* update our base MAC address if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_uc_addr(hw, interface->glort,
+ hw->mac.addr, vid, set, 0);
+ else
+ err = -EHOSTDOWN;
+
if (err)
goto err_out;
@@ -907,12 +933,15 @@ static int __fm10k_uc_sync(struct net_device *dev,
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- /* update table with current entries */
+ /* update table with current entries if host's mailbox is ready */
+ if (!fm10k_host_mbx_ready(interface))
+ return -EHOSTDOWN;
+
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
- vid, sync, 0);
+ vid, sync, 0);
if (err)
return err;
}
@@ -970,7 +999,10 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
- /* update table with current entries */
+ /* update table with current entries if host's mailbox is ready */
+ if (!fm10k_host_mbx_ready(interface))
+ return 0;
+
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
@@ -1018,8 +1050,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
- /* update xcast mode */
- hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode);
+ /* update xcast mode if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_xcast_mode(hw, interface->glort,
+ xcast_mode);
/* record updated xcast mode state */
interface->xcast_mode = xcast_mode;
@@ -1054,8 +1088,10 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_mbx_lock(interface);
- /* Enable logical port */
- hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true);
+ /* Enable logical port if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_lport_state(hw, glort,
+ interface->glort_count, true);
/* update VLAN table */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0,
@@ -1069,12 +1105,18 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
- hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
- vid, true, 0);
+
+ /* Update unicast entries if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
+ vid, true, 0);
}
- /* update xcast mode before synchronizing addresses */
- hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
+ /* update xcast mode before synchronizing addresses if host's mailbox
+ * is ready
+ */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
@@ -1096,9 +1138,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
fm10k_mbx_lock(interface);
- /* clear the logical port state on lower device */
- hw->mac.ops.update_lport_state(hw, interface->glort,
- interface->glort_count, false);
+ /* clear the logical port state on lower device if host's mailbox is
+ * ready
+ */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_lport_state(hw, interface->glort,
+ interface->glort_count, false);
fm10k_mbx_unlock(interface);
@@ -1115,8 +1160,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
* @netdev: network interface device structure
* @stats: storage space for 64bit statistics
*
- * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
- * function replaces fm10k_get_stats for kernels which support it.
+ * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
+ * architectures.
*/
static void fm10k_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
@@ -1207,7 +1252,7 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
goto err_open;
/* flag to indicate SWPRI has yet to be updated */
- interface->flags |= FM10K_FLAG_SWPRI_CONFIG;
+ set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
return 0;
err_open:
@@ -1226,7 +1271,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return fm10k_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return fm10k_setup_tc(dev, tc->mqprio->num_tc);
}
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
@@ -1317,8 +1364,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
fm10k_mbx_lock(interface);
glort = l2_accel->dglort + 1 + i;
- hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0);
+
+ if (fm10k_host_mbx_ready(interface)) {
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_MULTI);
+ hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+ 0, true, 0);
+ }
fm10k_mbx_unlock(interface);
@@ -1352,8 +1404,13 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
fm10k_mbx_lock(interface);
glort = l2_accel->dglort + 1 + i;
- hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0);
+
+ if (fm10k_host_mbx_ready(interface)) {
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_NONE);
+ hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+ 0, false, 0);
+ }
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e372a5823480..3e26d27ad213 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/aer.h>
#include "fm10k.h"
@@ -92,18 +93,29 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface)
void fm10k_service_event_schedule(struct fm10k_intfc *interface)
{
- if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
- !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
+ if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
+ !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
+ clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
queue_work(fm10k_workqueue, &interface->service_task);
+ } else {
+ set_bit(__FM10K_SERVICE_REQUEST, interface->state);
+ }
}
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
- clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
+ clear_bit(__FM10K_SERVICE_SCHED, interface->state);
+
+ /* If a service event was requested since we started, immediately
+ * re-schedule now. This ensures we don't drop a request until the
+ * next timer event.
+ */
+ if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
+ fm10k_service_event_schedule(interface);
}
/**
@@ -136,7 +148,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
if (~value) {
interface->hw.hw_addr = interface->uc_addr;
netif_device_attach(netdev);
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
netdev_warn(netdev, "PCIe link restored, device now attached\n");
return;
}
@@ -158,7 +170,7 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
- while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+ while (test_and_set_bit(__FM10K_RESETTING, interface->state))
usleep_range(1000, 2000);
rtnl_lock();
@@ -241,7 +253,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
rtnl_unlock();
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
err_open:
@@ -253,7 +265,7 @@ reinit_err:
rtnl_unlock();
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
}
@@ -272,11 +284,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
{
- if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
+ interface->flags))
return;
- interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
-
netdev_err(interface->netdev, "Reset interface\n");
fm10k_reinit(interface);
@@ -295,7 +306,7 @@ static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
int i;
/* clear flag indicating update is needed */
- interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
+ clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
/* these registers are only available on the PF */
if (hw->mac.type != fm10k_mac_pf)
@@ -316,14 +327,14 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
s32 err;
- if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
+ if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
interface->host_ready = false;
if (time_is_after_jiffies(interface->link_down_event))
return;
- clear_bit(__FM10K_LINK_DOWN, &interface->state);
+ clear_bit(__FM10K_LINK_DOWN, interface->state);
}
- if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
+ if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
if (rtnl_trylock()) {
fm10k_configure_swpri_map(interface);
rtnl_unlock();
@@ -335,7 +346,7 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
if (err && time_is_before_jiffies(interface->last_reset))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* free the lock */
fm10k_mbx_unlock(interface);
@@ -411,7 +422,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
int i;
/* ensure only one thread updates stats at a time */
- if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
return;
/* do not allow stats update via service task for next second */
@@ -492,7 +503,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
- clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+ clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
/**
@@ -522,7 +533,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
* controller to flush Tx.
*/
if (some_tx_pending)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
}
/**
@@ -532,8 +543,8 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
{
/* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state) ||
+ test_bit(__FM10K_RESETTING, interface->state))
return;
if (interface->host_ready)
@@ -563,8 +574,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
int i;
/* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state) ||
+ test_bit(__FM10K_RESETTING, interface->state))
return;
/* rate limit tx hang checks to only once every 2 seconds */
@@ -663,7 +674,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
/* Initialize XPS */
- if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) &&
+ if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
ring->q_vector)
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -743,6 +754,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* disable queue to avoid issues while updating state */
rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
rxqctl &= ~FM10K_RXQCTL_ENABLE;
+ fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
@@ -863,9 +875,9 @@ static void fm10k_configure_dglort(struct fm10k_intfc *interface)
FM10K_MRQC_IPV6 |
FM10K_MRQC_TCP_IPV6;
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
mrqc |= FM10K_MRQC_UDP_IPV4;
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
mrqc |= FM10K_MRQC_UDP_IPV6;
fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
@@ -980,7 +992,7 @@ void fm10k_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return;
for (i = 0; i < interface->num_q_vectors; i++)
@@ -1167,13 +1179,13 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
}
if (err == FM10K_ERR_RESET_REQUESTED)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
interface->link_down_event = jiffies + (4 * HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1246,12 +1258,12 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
/* MAC was changed so we need reset */
if (is_valid_ether_addr(hw->mac.perm_addr) &&
!ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* VLAN override was changed, or default VLAN changed */
if ((vlan_override != hw->mac.vlan_override) ||
(default_vid != hw->mac.default_vid))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
return 0;
}
@@ -1325,7 +1337,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
if (!err && hw->swapi.status) {
/* force link down for a reasonable delay */
interface->link_down_event = jiffies + (2 * HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1356,7 +1368,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
return 0;
}
@@ -1395,7 +1407,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
/* we need to reset if default VLAN was just updated */
if (pvid != hw->mac.default_vid)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
hw->mac.default_vid = pvid;
@@ -1623,10 +1635,10 @@ void fm10k_up(struct fm10k_intfc *interface)
hw->mac.ops.update_int_moderator(hw);
/* enable statistics capture again */
- clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+ clear_bit(__FM10K_UPDATING_STATS, interface->state);
/* clear down bit to indicate we are ready to go */
- clear_bit(__FM10K_DOWN, &interface->state);
+ clear_bit(__FM10K_DOWN, interface->state);
/* enable polling cleanups */
fm10k_napi_enable_all(interface);
@@ -1660,7 +1672,7 @@ void fm10k_down(struct fm10k_intfc *interface)
int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */
- if (test_and_set_bit(__FM10K_DOWN, &interface->state))
+ if (test_and_set_bit(__FM10K_DOWN, interface->state))
return;
/* call carrier off first to avoid false dev_watchdog timeouts */
@@ -1680,7 +1692,7 @@ void fm10k_down(struct fm10k_intfc *interface)
fm10k_update_stats(interface);
/* prevent updating statistics while we're down */
- while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
usleep_range(1000, 2000);
/* skip waiting for TX DMA if we lost PCIe link */
@@ -1849,8 +1861,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
/* Start off interface as being down */
- set_bit(__FM10K_DOWN, &interface->state);
- set_bit(__FM10K_UPDATING_STATS, &interface->state);
+ set_bit(__FM10K_DOWN, interface->state);
+ set_bit(__FM10K_UPDATING_STATS, interface->state);
return 0;
}
@@ -2027,7 +2039,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* must ensure it is disabled since we haven't yet requested the timer
* or work item.
*/
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
err = fm10k_mbx_request_irq(interface);
if (err)
@@ -2068,7 +2080,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fm10k_iov_configure(pdev, 0);
/* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
return 0;
@@ -2106,7 +2118,7 @@ static void fm10k_remove(struct pci_dev *pdev)
del_timer_sync(&interface->service_timer);
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
cancel_work_sync(&interface->service_task);
/* free netdev, this may bounce the interrupts due to setup_tc */
@@ -2145,7 +2157,7 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
* stopped. We stop the watchdog task until after we resume software
* activity.
*/
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
cancel_work_sync(&interface->service_task);
fm10k_prepare_for_reset(interface);
@@ -2171,10 +2183,10 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* force link to stay down for a second to prevent link flutter */
interface->link_down_event = jiffies + (HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
fm10k_service_event_schedule(interface);
return err;
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 3b3c63e54ed6..3da482c3d68d 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -28,6 +28,9 @@
# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_I40E) += i40e.o
i40e-objs := i40e_main.o \
@@ -45,4 +48,3 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 82d8040fa418..cdde3cc28fb5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -56,9 +56,6 @@
#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
-#ifdef I40E_FCOE
-#include "i40e_fcoe.h"
-#endif
#include "i40e_client.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
@@ -85,10 +82,6 @@
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
-#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
-#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
@@ -98,14 +91,6 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
-#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
-
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
@@ -140,7 +125,6 @@ enum i40e_state_t {
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
__I40E_DOWN,
- __I40E_NEEDS_RESTART,
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
@@ -153,15 +137,28 @@ enum i40e_state_t {
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
- __I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
- __I40E_PORT_TX_SUSPENDED,
+ __I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_STATE_SIZE__,
+};
+
+/* VSI state flags */
+enum i40e_vsi_state_t {
+ __I40E_VSI_DOWN,
+ __I40E_VSI_NEEDS_RESTART,
+ __I40E_VSI_SYNCING_FILTERS,
+ __I40E_VSI_OVERFLOW_PROMISC,
+ __I40E_VSI_REINIT_REQUESTED,
+ __I40E_VSI_DOWN_REQUESTED,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
};
enum i40e_interrupt_policy {
@@ -202,17 +199,32 @@ enum i40e_fd_stat_idx {
#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
+/* The following structure contains the data parsed from the user-defined
+ * field of the ethtool_rx_flow_spec structure.
+ */
+struct i40e_rx_flow_userdef {
+ bool flex_filter;
+ u16 flex_word;
+ u16 flex_offset;
+};
+
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
/* TX packet view of src and dst */
- __be32 dst_ip[4];
- __be32 src_ip[4];
+ __be32 dst_ip;
+ __be32 src_ip;
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
+
+ /* Flexible data to match within the packet payload */
+ __be16 flex_word;
+ u16 flex_offset;
+ bool flex_filter;
+
/* filter control */
u16 q_index;
u8 flex_off;
@@ -244,15 +256,85 @@ struct i40e_tc_configuration {
};
struct i40e_udp_port_config {
- __be16 index;
+ /* AdminQ command interface expects port number in Host byte order */
+ u16 port;
u8 type;
};
+/* macros related to FLX_PIT */
+#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
+ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK)
+#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)
+#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK)
+#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \
+ I40E_FLEX_SET_FSIZE(fsize) | \
+ I40E_FLEX_SET_SRC_WORD(src))
+
+#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
+ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+
+#define I40E_MAX_FLEX_SRC_OFFSET 0x1F
+
+/* macros related to GLQF_ORT */
+#define I40E_ORT_SET_IDX(idx) (((idx) << \
+ I40E_GLQF_ORT_PIT_INDX_SHIFT) & \
+ I40E_GLQF_ORT_PIT_INDX_MASK)
+
+#define I40E_ORT_SET_COUNT(count) (((count) << \
+ I40E_GLQF_ORT_FIELD_CNT_SHIFT) & \
+ I40E_GLQF_ORT_FIELD_CNT_MASK)
+
+#define I40E_ORT_SET_PAYLOAD(payload) (((payload) << \
+ I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) & \
+ I40E_GLQF_ORT_FLX_PAYLOAD_MASK)
+
+#define I40E_ORT_PREP_VAL(idx, count, payload) (I40E_ORT_SET_IDX(idx) | \
+ I40E_ORT_SET_COUNT(count) | \
+ I40E_ORT_SET_PAYLOAD(payload))
+
+#define I40E_L3_GLQF_ORT_IDX 34
+#define I40E_L4_GLQF_ORT_IDX 35
+
+/* Flex PIT register index */
+#define I40E_FLEX_PIT_IDX_START_L2 0
+#define I40E_FLEX_PIT_IDX_START_L3 3
+#define I40E_FLEX_PIT_IDX_START_L4 6
+
+#define I40E_FLEX_PIT_TABLE_SIZE 3
+
+#define I40E_FLEX_DEST_UNUSED 63
+
+#define I40E_FLEX_INDEX_ENTRIES 8
+
+/* Flex MASK to disable all flexible entries */
+#define I40E_FLEX_INPUT_MASK (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK | \
+ I40E_FLEX_52_MASK | I40E_FLEX_53_MASK | \
+ I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
+ I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+
+struct i40e_flex_pit {
+ struct list_head list;
+ u16 src_offset;
+ u8 pit_index;
+};
+
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
bool fc_autoneg_status;
@@ -262,10 +344,6 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num VFs requested for this VF */
u16 num_vf_qps; /* num queue pairs per VF */
-#ifdef I40E_FCOE
- u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
- u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
-#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
@@ -285,7 +363,23 @@ struct i40e_pf {
u32 fd_flush_cnt;
u32 fd_add_err;
u32 fd_atr_cnt;
- u32 fd_tcp_rule;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fd_tcp4_filter_cnt;
+ u16 fd_udp4_filter_cnt;
+ u16 fd_sctp4_filter_cnt;
+ u16 fd_ip4_filter_cnt;
+
+ /* Flexible filter table values that need to be programmed into
+ * hardware, which expects L3 and L4 to be programmed separately. We
+ * need to ensure that the values are in ascended order and don't have
+ * duplicates, so we track each L3 and L4 values in separate lists.
+ */
+ struct list_head l3_flex_pit_list;
+ struct list_head l4_flex_pit_list;
struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_udp_bitmap;
@@ -307,21 +401,15 @@ struct i40e_pf {
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
-#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
-#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
-#endif /* I40E_FCOE */
-#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
-#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
#define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
@@ -348,16 +436,13 @@ struct i40e_pf {
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
-#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
+#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
- /* tracks features that get auto disabled by errors */
- u64 auto_disable_flags;
-
-#ifdef I40E_FCOE
- struct i40e_fcoe fcoe;
-
-#endif /* I40E_FCOE */
+ struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
@@ -412,8 +497,6 @@ struct i40e_pf {
*/
u16 dcbx_cap;
- u32 fcoe_hmc_filt_num;
- u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock;
@@ -517,7 +600,7 @@ struct i40e_vsi {
bool stat_offsets_loaded;
u32 current_netdev_flags;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
@@ -533,16 +616,10 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
-#ifdef I40E_FCOE
- struct i40e_fcoe_stats fcoe_stats;
- struct i40e_fcoe_stats fcoe_stats_offsets;
- bool fcoe_stat_offsets_loaded;
-#endif
u32 tx_restart;
u32 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u64 tx_lost_interrupt;
u32 rx_buf_failed;
u32 rx_page_failed;
@@ -628,9 +705,6 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
-#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
- unsigned long hung_detected; /* Set/Reset for hung_detection logic */
-
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
@@ -696,27 +770,49 @@ static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
}
/**
- * i40e_rx_is_programming_status - check for programming status descriptor
- * @qw: the first quad word of the program status descriptor
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the PF struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
+/**
+ * i40e_read_fd_input_set - reads value of flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
*
- * The value of in the descriptor length field indicate if this
- * is a programming status descriptor for flow director or FCoE
- * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
- * it is a packet descriptor.
+ * This function reads value of flow director input set register
+ * specified by 'addr' (which is specific to flow-type)
**/
-static inline bool i40e_rx_is_programming_status(u64 qw)
+static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
{
- return I40E_RX_PROG_STATUS_DESC_LENGTH ==
- (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+ u64 val;
+
+ val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1));
+ val <<= 32;
+ val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+
+ return val;
}
/**
- * i40e_get_fd_cnt_all - get the total FD filter space available
+ * i40e_write_fd_input_set - writes value into flow director input set register
* @pf: pointer to the PF struct
+ * @addr: register addr
+ * @val: value to be written
+ *
+ * This function writes specified value to the register specified by 'addr'.
+ * This register is input set register based on flow-type.
**/
-static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
+ u16 addr, u64 val)
{
- return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1),
+ (u32)(val >> 32));
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0),
+ (u32)(val & 0xFFFFFFFFULL));
}
/* needed by i40e_ethtool.c */
@@ -725,7 +821,7 @@ void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
@@ -773,17 +869,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
- struct i40e_vsi_context *ctxt,
- u8 enabled_tc, bool is_add);
-#endif
void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
@@ -813,8 +906,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
@@ -838,20 +930,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
-#ifdef I40E_FCOE
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
- struct rtnl_link_stats64 *storage);
-int i40e_set_mac(struct net_device *netdev, void *p);
-void i40e_set_rx_mode(struct net_device *netdev);
-#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev);
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid);
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid);
-#endif
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
@@ -865,25 +944,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
-void i40e_netpoll(struct net_device *netdev);
-int i40e_fcoe_enable(struct net_device *netdev);
-int i40e_fcoe_disable(struct net_device *netdev);
-int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
-u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
-void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
-void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-void i40e_init_pf_fcoe(struct i40e_pf *pf);
-int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
-void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
-int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb);
-void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id);
-#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 56fb27298936..ba04988e0598 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -850,8 +850,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- usleep_range(1000, 2000);
- total_delay++;
+ udelay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index d92aad38afdc..2349fbe04bd2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 451f48b7540a..5eb04114e13f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,10 +185,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Pipeline Personalization Profile */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -563,6 +576,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -645,6 +708,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -696,6 +761,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1369,6 +1435,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Pipeline Personalization Profile */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ppp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_PPP_GET_CONF 0x1
+#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1844,11 +1940,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d570219efd9f..c3b81a97558e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -32,16 +32,10 @@
#include "i40e_client.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
-
+static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
-static LIST_HEAD(i40e_clients);
-static DEFINE_MUTEX(i40e_client_mutex);
-
-static LIST_HEAD(i40e_client_instances);
-static DEFINE_MUTEX(i40e_client_instance_mutex);
-
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len);
@@ -67,28 +61,6 @@ static struct i40e_ops i40e_lan_ops = {
};
/**
- * i40e_client_type_to_vsi_type - convert client type to vsi type
- * @client_type: the i40e_client type
- *
- * returns the related vsi type value
- **/
-static
-enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
-{
- switch (type) {
- case I40E_CLIENT_IWARP:
- return I40E_VSI_IWARP;
-
- case I40E_CLIENT_VMDQ2:
- return I40E_VSI_VMDQ2;
-
- default:
- pr_err("i40e: Client type unknown\n");
- return I40E_VSI_TYPE_UNKNOWN;
- }
-}
-
-/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
* @param: clinet param struct
@@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
void
i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->virtchnl_receive) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance virtual channel receive routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
- continue;
- }
- cdev->client->ops->virtchnl_receive(&cdev->lan_info,
- cdev->client,
- vf_id, msg, len);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+ return;
+ }
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
+ vf_id, msg, len);
}
/**
@@ -169,39 +132,30 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
**/
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
- if (!vsi)
+ if (!cdev || !cdev->client)
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance l2_param_change routine\n");
- continue;
- }
+ if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ return;
+ }
memset(&params, 0, sizeof(params));
i40e_client_get_params(vsi, &params);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
- continue;
- }
- cdev->lan_info.params = params;
- cdev->client->ops->l2_param_change(&cdev->lan_info,
- cdev->client,
- &params);
- }
- }
- mutex_unlock(&i40e_client_instance_mutex);
+ memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
+ cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
+ &params);
}
/**
- * i40e_client_release_qvlist
+ * i40e_client_release_qvlist - release MSI-X vector mapping for client
* @ldev: pointer to L2 context.
*
**/
@@ -237,26 +191,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
**/
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.netdev == vsi->netdev) {
- if (!cdev->client ||
- !cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance close routine\n");
- continue;
- }
- cdev->client->ops->close(&cdev->lan_info, cdev->client,
- reset);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
}
/**
@@ -268,30 +215,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
**/
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_reset) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF reset routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
- continue;
- }
- cdev->client->ops->vf_reset(&cdev->lan_info,
- cdev->client, vf_id);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
}
/**
@@ -303,30 +240,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
**/
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_enable) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF enable routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
- continue;
- }
- cdev->client->ops->vf_enable(&cdev->lan_info,
- cdev->client, num_vfs);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
}
/**
@@ -337,37 +265,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
* If there is a client of the specified type attached to this PF, call
* its vf_capable routine
**/
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type)
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
int capable = false;
- if (!pf)
- return false;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_capable ||
- !(cdev->client->type == type)) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
- continue;
- }
- capable = cdev->client->ops->vf_capable(&cdev->lan_info,
- cdev->client,
- vf_id);
- break;
- }
+ if (!cdev || !cdev->client)
+ goto out;
+ if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
+ dev_info(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ goto out;
}
- mutex_unlock(&i40e_client_instance_mutex);
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
+ goto out;
+
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+out:
return capable;
}
@@ -377,27 +293,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
* @client: pointer to a client struct in the client list.
* @existing: if there was already an existing instance
*
- * Returns cdev ptr on success or if already exists, NULL on failure
**/
-static
-struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client,
- bool *existing)
+static void i40e_client_add_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- *existing = true;
- goto out;
- }
- }
+ if (!registered_client || pf->cinst)
+ return;
+
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
- goto out;
+ return;
cdev->lan_info.pf = (void *)pf;
cdev->lan_info.netdev = vsi->netdev;
@@ -417,7 +325,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
kfree(cdev);
cdev = NULL;
- goto out;
+ return;
}
cdev->lan_info.msix_count = pf->num_iwarp_msix;
@@ -430,41 +338,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
- cdev->client = client;
- INIT_LIST_HEAD(&cdev->list);
- list_add(&cdev->list, &i40e_client_instances);
-out:
- mutex_unlock(&i40e_client_instance_mutex);
- return cdev;
+ cdev->client = registered_client;
+ pf->cinst = cdev;
}
/**
* i40e_client_del_instance - removes a client instance from the list
* @pf: pointer to the board struct
*
- * Returns 0 on success or non-0 on error
**/
static
-int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+void i40e_client_del_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev, *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf != pf) || (cdev->client != client))
- continue;
-
- dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
- list_del(&cdev->list);
- kfree(cdev);
- ret = 0;
- break;
- }
- mutex_unlock(&i40e_client_instance_mutex);
- return ret;
+ kfree(pf->cinst);
+ pf->cinst = NULL;
}
/**
@@ -473,67 +360,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
+ struct i40e_client *client = registered_client;
struct i40e_client_instance *cdev;
- struct i40e_client *client;
- bool existing = false;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
return;
pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ cdev = pf->cinst;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
- /* Check client state and instantiate client if client registered */
- mutex_lock(&i40e_client_mutex);
- list_for_each_entry(client, &i40e_clients, list) {
- /* first check client is registered */
- if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
- continue;
-
- /* Do we also need the LAN VSI to be up, to create instance */
- if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
- /* check if L2 VSI is up, if not we are not ready */
- if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- continue;
- } else {
- dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
- client->name);
- }
-
- /* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client, &existing);
- if (!cdev)
- continue;
-
- if (!existing) {
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.bus_id, pf->hw.bus.device,
- pf->hw.bus.func);
- }
+ if (!client || !cdev)
+ return;
- mutex_lock(&i40e_client_instance_mutex);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- /* Send an Open request to the client */
- if (client->ops && client->ops->open)
- ret = client->ops->open(&cdev->lan_info,
- client);
- if (!ret) {
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state);
- } else {
- /* remove client instance */
- i40e_client_del_instance(pf, client);
+ /* Here we handle client opens. If the client is down, but
+ * the netdev is up, then open the client.
+ */
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ client->ops && client->ops->open) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ ret = client->ops->open(&cdev->lan_info, client);
+ if (ret) {
+ /* Remove failed client instance */
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ i40e_client_del_instance(pf);
}
}
- mutex_unlock(&i40e_client_instance_mutex);
+ } else {
+ /* Likewise for client close. If the client is up, but the netdev
+ * is down, then close the client.
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ client->ops && client->ops->close) {
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ client->ops->close(&cdev->lan_info, client, false);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
}
- mutex_unlock(&i40e_client_mutex);
}
/**
@@ -566,6 +436,12 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.bus_id,
pf->hw.bus.device, pf->hw.bus.func);
+ /* If a client has already been registered, we need to add an instance
+ * of it to our new LAN device.
+ */
+ if (registered_client)
+ i40e_client_add_instance(pf);
+
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time.
@@ -589,6 +465,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
+ /* First, remove any client instance. */
+ i40e_client_del_instance(pf);
+
mutex_lock(&i40e_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->pf == pf) {
@@ -601,7 +480,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
break;
}
}
-
mutex_unlock(&i40e_device_mutex);
return ret;
}
@@ -610,22 +488,24 @@ int i40e_lan_del_device(struct i40e_pf *pf)
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
- * Return 0 on success or < 0 on error
**/
-static int i40e_client_release(struct i40e_client *client)
+static void i40e_client_release(struct i40e_client *client)
{
- struct i40e_client_instance *cdev, *tmp;
+ struct i40e_client_instance *cdev;
+ struct i40e_device *ldev;
struct i40e_pf *pf;
- int ret = 0;
- LIST_HEAD(cdevs_tmp);
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if (strncmp(cdev->client->name, client->name,
- I40E_CLIENT_STR_LENGTH))
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ pf = ldev->pf;
+ cdev = pf->cinst;
+ if (!cdev)
continue;
- pf = (struct i40e_pf *)cdev->lan_info.pf;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED,
+ pf->state))
+ usleep_range(500, 1000);
+
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
@@ -637,18 +517,13 @@ static int i40e_client_release(struct i40e_client *client)
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
- /* delete the client instance from the list */
- list_move(&cdev->list, &cdevs_tmp);
+ /* delete the client instance */
+ i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
- mutex_unlock(&i40e_client_instance_mutex);
-
- /* free the client device and release its vsi */
- list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
- kfree(cdev);
- }
- return ret;
+ mutex_unlock(&i40e_device_mutex);
}
/**
@@ -664,6 +539,7 @@ static void i40e_client_prepare(struct i40e_client *client)
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
+ i40e_client_add_instance(pf);
/* Start the client subtask */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
@@ -785,15 +661,15 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
switch (reset_level) {
case I40E_CLIENT_RESET_LEVEL_PF:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case I40E_CLIENT_RESET_LEVEL_CORE:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
default:
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported reset: %d.\n",
- client->name, pf->hw.pf_id, reset_level);
+ "Client for PF id %d requested an unsupported reset: %d.\n",
+ pf->hw.pf_id, reset_level);
break;
}
@@ -852,8 +728,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
} else {
update = false;
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported Config: %x.\n",
- client->name, pf->hw.pf_id, flag);
+ "Client for PF id %d request an unsupported Config: %x.\n",
+ pf->hw.pf_id, flag);
}
if (update) {
@@ -878,7 +754,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
int i40e_register_client(struct i40e_client *client)
{
int ret = 0;
- enum i40e_vsi_type vsi_type;
if (!client) {
ret = -EIO;
@@ -891,11 +766,9 @@ int i40e_register_client(struct i40e_client *client)
goto out;
}
- mutex_lock(&i40e_client_mutex);
- if (i40e_client_is_registered(client)) {
+ if (registered_client) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -EEXIST;
goto out;
}
@@ -908,22 +781,11 @@ int i40e_register_client(struct i40e_client *client)
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
- mutex_unlock(&i40e_client_mutex);
ret = -EIO;
goto out;
}
- vsi_type = i40e_client_type_to_vsi_type(client->type);
- if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
- pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
- client->name, client->type);
- mutex_unlock(&i40e_client_mutex);
- ret = -EIO;
- goto out;
- }
- list_add(&client->list, &i40e_clients);
- set_bit(__I40E_CLIENT_REGISTERED, &client->state);
- mutex_unlock(&i40e_client_mutex);
+ registered_client = client;
i40e_client_prepare(client);
@@ -943,29 +805,21 @@ int i40e_unregister_client(struct i40e_client *client)
{
int ret = 0;
- /* When a unregister request comes through we would have to send
- * a close for each of the client instances that were opened.
- * client_release function is called to handle this.
- */
- mutex_lock(&i40e_client_mutex);
- if (!client || i40e_client_release(client)) {
- ret = -EIO;
- goto out;
- }
-
- /* TODO: check if device is in reset, or if that matters? */
- if (!i40e_client_is_registered(client)) {
+ if (registered_client != client) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
- clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
- list_del(&client->list);
- pr_info("i40e: Unregistered client %s with return code %d\n",
- client->name, ret);
+ registered_client = NULL;
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40e_client_release(client);
+
+ pr_info("i40e: Unregistered client %s\n", client->name);
out:
- mutex_unlock(&i40e_client_mutex);
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 528bd79b05fe..15b21a5315b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -57,11 +57,6 @@ enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_OPENED,
};
-enum i40e_client_type {
- I40E_CLIENT_IWARP,
- I40E_CLIENT_VMDQ2
-};
-
struct i40e_ops;
struct i40e_client;
@@ -214,7 +209,8 @@ struct i40e_client {
u32 flags;
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
- enum i40e_client_type type;
+ u8 type;
+#define I40E_CLIENT_IWARP 0
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ece57d6a6e23..24f020655291 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
}
-#ifdef I40E_FCOE
-
-/**
- * i40e_get_san_mac_addr - get SAN MAC address
- * @hw: pointer to the HW structure
- * @mac_addr: pointer to SAN MAC address
- *
- * Reads the adapter's SAN MAC address from NVM
- **/
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
-{
- struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
- u16 flags = 0;
-
- status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
- if (status)
- return status;
-
- if (flags & I40E_AQC_SAN_ADDR_VALID)
- ether_addr_copy(mac_addr, addrs.pf_san_mac);
- else
- status = I40E_ERR_INVALID_MAC_ADDR;
-
- return status;
-}
-#endif
/**
* i40e_read_pba_string - Reads part number string from EEPROM
@@ -4990,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5049,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -5065,3 +5042,215 @@ do_retry:
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
+
+/**
+ * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ppp_resp *resp;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_PPP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+
+ status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 267ad2588255..8f326f87a815 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
- " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
- vsi->state, vsi->flags,
- vsi->netdev_registered, vsi->current_netdev_flags);
+ " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+ vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
+ for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
+ dev_info(&pf->pdev->dev,
+ " state[%d] = %08lx\n",
+ i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi])
dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
@@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
- (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
+ (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
"ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
@@ -384,6 +387,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ if (vsi->type == I40E_VSI_SRIOV)
+ dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
dev_info(&pf->pdev->dev,
" info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
vsi->info.valid_sections, vsi->info.switch_id);
@@ -484,25 +489,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->bw_ets_limit_credits[i],
vsi->bw_ets_max_quanta[i]);
}
-#ifdef I40E_FCOE
- if (vsi->type == I40E_VSI_FCOE) {
- dev_info(&pf->pdev->dev,
- " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
- vsi->fcoe_stats.rx_fcoe_packets,
- vsi->fcoe_stats.rx_fcoe_dwords,
- vsi->fcoe_stats.rx_fcoe_dropped);
- dev_info(&pf->pdev->dev,
- " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
- vsi->fcoe_stats.tx_fcoe_packets,
- vsi->fcoe_stats.tx_fcoe_dwords);
- dev_info(&pf->pdev->dev,
- " fcoe_stats: bad_crc = %llu, last_error = %llu\n",
- vsi->fcoe_stats.fcoe_bad_fccrc,
- vsi->fcoe_stats.fcoe_last_error);
- dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
- vsi->fcoe_stats.fcoe_ddp_count);
- }
-#endif
}
/**
@@ -713,6 +699,47 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_dump_vf - dump VF info
+ * @pf: the i40e_pf created in command write
+ * @vf_id: the vf_id from the user
+ **/
+static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vf *vf;
+ struct i40e_vsi *vsi;
+
+ if (!pf->num_alloc_vfs) {
+ dev_info(&pf->pdev->dev, "no VFs allocated\n");
+ } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
+ vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
+ vf->num_mdd_events,
+ vf->num_invalid_msgs,
+ vf->num_valid_msgs);
+ } else {
+ dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
+ }
+}
+
+/**
+ * i40e_dbg_dump_vf_all - dump VF info for all VFs
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
+{
+ int i;
+
+ if (!pf->num_alloc_vfs)
+ dev_info(&pf->pdev->dev, "no VFs enabled!\n");
+ else
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_dbg_dump_vf(pf, i);
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -731,6 +758,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_vsi *vsi;
int vsi_seid;
int veb_seid;
+ int vf_id;
int cnt;
/* don't allow partial writes */
@@ -933,6 +961,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_dump_veb_seid(pf, vsi_seid);
else
i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
+ cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
+ if (cnt > 0)
+ i40e_dbg_dump_vf(pf, vf_id);
+ else
+ i40e_dbg_dump_vf_all(pf);
} else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
int ring_id, desc_n;
if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
@@ -1128,6 +1162,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
dev_info(&pf->pdev->dev, "dump reset stats\n");
dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
dev_info(&pf->pdev->dev,
"dump debug fwdata <cluster_id> <table_id> <index>\n");
}
@@ -1674,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
} else if (!vsi->netdev) {
dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
vsi_seid);
- } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+ } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
vsi_seid);
} else if (rtnl_trylock()) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a22e26200bcc..7a8eb486b9ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
- I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
@@ -162,19 +161,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
-#ifdef I40E_FCOE
-static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
- I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
- I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
- I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
- I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
- I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
- I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
- I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
- I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
-};
-
-#endif /* I40E_FCOE */
#define I40E_QUEUE_STATS_LEN(n) \
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
* 2 /* Tx and Rx together */ \
@@ -182,17 +168,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
-#ifdef I40E_FCOE
-#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
- I40E_FCOE_STATS_LEN + \
- I40E_MISC_STATS_LEN + \
- I40E_QUEUE_STATS_LEN((n)))
-#else
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
-#endif /* I40E_FCOE */
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
@@ -228,22 +206,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
-static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "MFP",
- "LinkPolling",
- "flow-director-atr",
- "veb-stats",
- "hw-atr-eviction",
+struct i40e_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u64 flag;
+ bool read_only;
};
-#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
+ /* NOTE: MFP setting cannot be changed */
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
/* Private flags with a global effect, restricted to PF 0 */
-static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "vf-true-promisc-support",
+static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
+ I40E_PRIV_FLAG("vf-true-promisc-support",
+ I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
};
-#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -387,7 +380,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct net_device *netdev,
struct i40e_pf *pf)
{
@@ -395,90 +388,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
u32 link_speed = hw_link_info->link_speed;
u32 e_advertising = 0x0;
u32 e_supported = 0x0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_XLAUI:
case I40E_PHY_TYPE_XLPPI:
case I40E_PHY_TYPE_40GBASE_AOC:
- ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ supported = SUPPORTED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
- ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ supported = SUPPORTED_40000baseLR4_Full;
break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
if (hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_SX ||
hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_LX) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = SUPPORTED_10000baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
break;
case I40E_PHY_TYPE_40GBASE_KR4:
@@ -486,25 +485,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
- ecmd->supported |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_20000baseKR2_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_20000baseKR2_Full |
- ADVERTISED_10000baseKR_Full |
- ADVERTISED_10000baseKX4_Full |
- ADVERTISED_1000baseKX_Full |
- ADVERTISED_Autoneg;
+ supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
case I40E_PHY_TYPE_25GBASE_KR:
case I40E_PHY_TYPE_25GBASE_CR:
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg;
+ advertising = ADVERTISED_Autoneg;
/* TODO: add speeds when ethtool is ready to support*/
break;
default:
@@ -520,38 +519,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
i40e_phy_type_to_ethtool(pf, &e_supported,
&e_advertising);
- ecmd->supported = ecmd->supported & e_supported;
- ecmd->advertising = ecmd->advertising & e_advertising;
+ supported = supported & e_supported;
+ advertising = advertising & e_advertising;
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
}
/**
@@ -562,18 +566,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct i40e_pf *pf)
{
+ u32 supported, advertising;
+
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- i40e_phy_type_to_ethtool(pf, &ecmd->supported,
- &ecmd->advertising);
+ i40e_phy_type_to_ethtool(pf, &supported, &advertising);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
/* With no link speed and duplex are unknown */
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
/**
@@ -583,74 +593,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
*
* Reports speed/duplex settings based on media_type
**/
-static int i40e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 advertising;
if (link_up)
- i40e_get_settings_link_up(hw, ecmd, netdev, pf);
+ i40e_get_settings_link_up(hw, cmd, netdev, pf);
else
- i40e_get_settings_link_down(hw, ecmd, pf);
+ i40e_get_settings_link_down(hw, cmd, pf);
/* Now set the settings that don't rely on link being up/down */
/* Set autoneg settings */
- ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
switch (hw->phy.media_type) {
case I40E_MEDIA_TYPE_BACKPLANE:
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_Backplane;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_Backplane;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Backplane);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Backplane);
+ cmd->base.port = PORT_NONE;
break;
case I40E_MEDIA_TYPE_BASET:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ cmd->base.port = PORT_TP;
break;
case I40E_MEDIA_TYPE_DA:
case I40E_MEDIA_TYPE_CX4:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ cmd->base.port = PORT_DA;
break;
case I40E_MEDIA_TYPE_FIBER:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
default:
- ecmd->port = PORT_OTHER;
+ cmd->base.port = PORT_OTHER;
break;
}
- /* Set transceiver */
- ecmd->transceiver = XCVR_EXTERNAL;
-
/* Set flow control settings */
- ecmd->supported |= SUPPORTED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
- ecmd->advertising |= ADVERTISED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
break;
case I40E_FC_TX_PAUSE:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
case I40E_FC_RX_PAUSE:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+
+ advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
break;
}
@@ -664,8 +685,8 @@ static int i40e_get_settings(struct net_device *netdev,
*
* Set speed/duplex per media_types advertised/forced
**/
-static int i40e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
@@ -673,12 +694,15 @@ static int i40e_set_settings(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
- struct ethtool_cmd safe_ecmd;
+ struct ethtool_link_ksettings safe_cmd;
+ struct ethtool_link_ksettings copy_cmd;
i40e_status status = 0;
bool change = false;
+ int timeout = 50;
int err = 0;
- u8 autoneg;
+ u32 autoneg;
u32 advertise;
+ u32 tmp;
/* Changing port settings is not supported if this isn't the
* port's controlling PF
@@ -706,33 +730,47 @@ static int i40e_set_settings(struct net_device *netdev,
return -EOPNOTSUPP;
}
+ /* copy the cmd to copy_cmd to avoid modifying the origin */
+ memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
+
/* get our own copy of the bits to check against */
- memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
- i40e_get_settings(netdev, &safe_ecmd);
+ memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
+ i40e_get_link_ksettings(netdev, &safe_cmd);
- /* save autoneg and speed out of ecmd */
- autoneg = ecmd->autoneg;
- advertise = ecmd->advertising;
+ /* save autoneg and speed out of cmd */
+ autoneg = cmd->base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ cmd->link_modes.advertising);
/* set autoneg and speed back to what they currently are */
- ecmd->autoneg = safe_ecmd.autoneg;
- ecmd->advertising = safe_ecmd.advertising;
+ copy_cmd.base.autoneg = safe_cmd.base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tmp, safe_cmd.link_modes.advertising);
+ ethtool_convert_legacy_u32_to_link_mode(
+ copy_cmd.link_modes.advertising, tmp);
+
+ copy_cmd.base.cmd = safe_cmd.base.cmd;
- ecmd->cmd = safe_ecmd.cmd;
- /* If ecmd and safe_ecmd are not the same now, then they are
+ /* If copy_cmd and safe_cmd are not the same now, then they are
* trying to set something that we do not support
*/
- if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+ if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
return -EOPNOTSUPP;
- while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
usleep_range(1000, 2000);
+ }
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
- if (status)
- return -EAGAIN;
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
/* Copy abilities to config in case autoneg is not
* set below
@@ -745,9 +783,11 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
/* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ if (!ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities |
@@ -760,11 +800,13 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ if (ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg) &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities &
@@ -773,8 +815,12 @@ static int i40e_set_settings(struct net_device *netdev,
}
}
- if (advertise & ~safe_ecmd.supported)
- return -EINVAL;
+ ethtool_convert_link_mode_to_legacy_u32(&tmp,
+ safe_cmd.link_modes.supported);
+ if (advertise & ~tmp) {
+ err = -EINVAL;
+ goto done;
+ }
if (advertise & ADVERTISED_100baseT_Full)
config.link_speed |= I40E_LINK_SPEED_100MB;
@@ -830,7 +876,8 @@ static int i40e_set_settings(struct net_device *netdev,
netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto done;
}
status = i40e_update_link_info(hw);
@@ -843,6 +890,9 @@ static int i40e_set_settings(struct net_device *netdev,
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
}
+done:
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+
return err;
}
@@ -937,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, &pf->state) &&
+ if (!test_bit(__I40E_DOWN, pf->state) &&
!(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
@@ -989,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, &pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
msleep(75);
- if (!test_bit(__I40E_DOWN, &pf->state))
+ if (!test_bit(__I40E_DOWN, pf->state))
return i40e_nway_reset(netdev);
}
@@ -1089,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
- else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
@@ -1165,6 +1215,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
@@ -1191,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
- else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
@@ -1252,6 +1307,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
+ int timeout = 50;
int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1276,8 +1332,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
usleep_range(1000, 2000);
+ }
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
@@ -1425,7 +1485,7 @@ free_tx:
}
done:
- clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err;
}
@@ -1483,13 +1543,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-#ifdef I40E_FCOE
- for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
- p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-#endif
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
@@ -1577,13 +1630,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
-#ifdef I40E_FCOE
- for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_fcoe_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
-#endif
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -1648,12 +1694,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(data, i40e_priv_flags_strings,
- I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
- data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
- if (pf->hw.pf_id == 0)
- memcpy(data, i40e_gl_priv_flags_strings,
- I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ if (pf->hw.pf_id != 0)
+ break;
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gl_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
break;
default:
break;
@@ -1774,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
+ if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
return true;
return false;
}
@@ -1795,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* Offline tests */
netif_info(pf, drv, netdev, "offline testing starting\n");
- set_bit(__I40E_TESTING, &pf->state);
+ set_bit(__I40E_TESTING, pf->state);
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
@@ -1805,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, &pf->state);
+ clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -1819,7 +1871,7 @@ static void i40e_diag_test(struct net_device *netdev,
* link then the following link test would have
* to be moved to before the reset
*/
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1834,8 +1886,8 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, &pf->state);
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ clear_bit(__I40E_TESTING, pf->state);
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (if_running)
i40e_open(netdev);
@@ -2285,6 +2337,102 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
}
/**
+ * i40e_check_mask - Check whether a mask field is set
+ * @mask: the full mask value
+ * @field; mask of the field to check
+ *
+ * If the given mask is fully set, return positive value. If the mask for the
+ * field is fully unset, return zero. Otherwise return a negative error code.
+ **/
+static int i40e_check_mask(u64 mask, u64 field)
+{
+ u64 value = mask & field;
+
+ if (value == field)
+ return 1;
+ else if (!value)
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
+ * @fsp: pointer to rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Read the user-defined data and deconstruct the value into a structure. No
+ * other code should read the user-defined data, so as to ensure that every
+ * place consistently reads the value correctly.
+ *
+ * The user-defined field is a 64bit Big Endian format value, which we
+ * deconstruct by reading bits or bit fields from it. Single bit flags shall
+ * be defined starting from the highest bits, while small bit field values
+ * shall be defined starting from the lowest bits.
+ *
+ * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
+ * and the filter should be rejected. The data structure will always be
+ * modified even if FLOW_EXT is not set.
+ *
+ **/
+static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value, mask;
+ int valid;
+
+ /* Zero memory first so it's always consistent. */
+ memset(data, 0, sizeof(*data));
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
+
+#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
+#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
+#define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0)
+
+ valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
+ if (valid < 0) {
+ return -EINVAL;
+ } else if (valid) {
+ data->flex_word = value & I40E_USERDEF_FLEX_WORD;
+ data->flex_offset =
+ (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ data->flex_filter = true;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_fill_rx_flow_user_data - Fill in user-defined data field
+ * @fsp: pointer to rx_flow specification
+ *
+ * Reads the userdef data structure and properly fills in the user defined
+ * fields of the rx_flow_spec.
+ **/
+static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value = 0, mask = 0;
+
+ if (data->flex_filter) {
+ value |= data->flex_word;
+ value |= (u64)data->flex_offset << 16;
+ mask |= I40E_USERDEF_FLEX_FILTER;
+ }
+
+ if (value || mask)
+ fsp->flow_type |= FLOW_EXT;
+
+ *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value);
+ *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask);
+}
+
+/**
* i40e_get_ethtool_fdir_all - Populates the rule count of a command
* @pf: Pointer to the physical function struct
* @cmd: The command to get or set Rx flow classification rules
@@ -2335,8 +2483,11 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ u64 input_set;
+ u16 index;
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
@@ -2359,8 +2510,48 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
*/
fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+
+ switch (rule->flow_type) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ break;
+ default:
+ /* If we have stored a filter with a flow type not listed here
+ * it is almost certainly a driver bug. WARN(), and then
+ * assign the input_set as if all fields are enabled to avoid
+ * reading unassigned memory.
+ */
+ WARN(1, "Missing input set index for flow_type %d\n",
+ rule->flow_type);
+ input_set = 0xFFFFFFFFFFFFFFFFULL;
+ goto no_input_set;
+ }
+
+ input_set = i40e_read_fd_input_set(pf, index);
+
+no_input_set:
+ if (input_set & I40E_L3_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+
+ if (input_set & I40E_L3_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+
+ if (input_set & I40E_L4_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+
+ if (input_set & I40E_L4_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2372,11 +2563,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
- fsp->h_ext.data[1] = htonl(vsi->vf_id);
- fsp->m_ext.data[1] = htonl(0x1);
+ /* VFs are zero-indexed by the driver, but ethtool
+ * expects them to be one-indexed, so add one here
+ */
+ u64 ring_vf = vsi->vf_id + 1;
+
+ ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fsp->ring_cookie |= ring_vf;
}
}
+ if (rule->flex_filter) {
+ userdef.flex_filter = true;
+ userdef.flex_word = be16_to_cpu(rule->flex_word);
+ userdef.flex_offset = rule->flex_offset;
+ }
+
+ i40e_fill_rx_flow_user_data(fsp, &userdef);
+
return 0;
}
@@ -2574,24 +2778,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
/**
- * i40e_match_fdir_input_set - Match a new filter against an existing one
- * @rule: The filter already added
- * @input: The new filter to comapre against
- *
- * Returns true if the two input set match
- **/
-static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
- struct i40e_fdir_filter *input)
-{
- if ((rule->dst_ip[0] != input->dst_ip[0]) ||
- (rule->src_ip[0] != input->src_ip[0]) ||
- (rule->dst_port != input->dst_port) ||
- (rule->src_port != input->src_port))
- return false;
- return true;
-}
-
-/**
* i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
* @vsi: Pointer to the targeted VSI
* @input: The filter to update or NULL to indicate deletion
@@ -2626,22 +2812,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* if there is an old rule occupying our place remove it */
if (rule && (rule->fd_id == sw_idx)) {
- if (input && !i40e_match_fdir_input_set(rule, input))
- err = i40e_add_del_fdir(vsi, rule, false);
- else if (!input)
- err = i40e_add_del_fdir(vsi, rule, false);
+ /* Remove this rule, since we're either deleting it, or
+ * replacing it.
+ */
+ err = i40e_add_del_fdir(vsi, rule, false);
hlist_del(&rule->fdir_node);
kfree(rule);
pf->fdir_pf_active_filters--;
}
- /* If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
+ /* If we weren't given an input, this is a delete, so just return the
+ * error code indicating if there was an entry at the requested slot
*/
if (!input)
return err;
- /* initialize node and set software index */
+ /* Otherwise, install the new rule as requested */
INIT_HLIST_NODE(&input->fdir_node);
/* add filter to the list */
@@ -2658,6 +2844,69 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
}
/**
+ * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
+ * @pf: pointer to PF structure
+ *
+ * This function searches the list of filters and determines which FLX_PIT
+ * entries are still required. It will prune any entries which are no longer
+ * in use after the deletion.
+ **/
+static void i40e_prune_flex_pit_list(struct i40e_pf *pf)
+{
+ struct i40e_flex_pit *entry, *tmp;
+ struct i40e_fdir_filter *rule;
+
+ /* First, we'll check the l3 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ if (rule->flow_type != IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ /* Followed by the L4 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ /* Skip this filter if it's L3, since we already
+ * checked those in the above loop
+ */
+ if (rule->flow_type == IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+/**
* i40e_del_fdir_entry - Deletes a Flow Director filter entry
* @vsi: Pointer to the targeted VSI
* @cmd: The command to get or set Rx flow classification rules
@@ -2675,20 +2924,700 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
int ret = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+ i40e_prune_flex_pit_list(pf);
+
i40e_fdir_check_and_reenable(pf);
return ret;
}
/**
+ * i40e_unused_pit_index - Find an unused PIT index for given list
+ * @pf: the PF data structure
+ *
+ * Find the first unused flexible PIT index entry. We search both the L3 and
+ * L4 flexible PIT lists so that the returned index is unique and unused by
+ * either currently programmed L3 or L4 filters. We use a bit field as storage
+ * to track which indexes are already used.
+ **/
+static u8 i40e_unused_pit_index(struct i40e_pf *pf)
+{
+ unsigned long available_index = 0xFF;
+ struct i40e_flex_pit *entry;
+
+ /* We need to make sure that the new index isn't in use by either L3
+ * or L4 filters so that IP_USER_FLOW filters can program both L3 and
+ * L4 to use the same index.
+ */
+
+ list_for_each_entry(entry, &pf->l4_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ list_for_each_entry(entry, &pf->l3_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ return find_first_bit(&available_index, 8);
+}
+
+/**
+ * i40e_find_flex_offset - Find an existing flex src_offset
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to find
+ *
+ * Searches the flex_pit_list for an existing offset. If no offset is
+ * currently programmed, then this will return an ERR_PTR if there is no space
+ * to add a new offset, otherwise it returns NULL.
+ **/
+static
+struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset)
+{
+ struct i40e_flex_pit *entry;
+ int size = 0;
+
+ /* Search for the src_offset first. If we find a matching entry
+ * already programmed, we can simply re-use it.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ size++;
+ if (entry->src_offset == src_offset)
+ return entry;
+ }
+
+ /* If we haven't found an entry yet, then the provided src offset has
+ * not yet been programmed. We will program the src offset later on,
+ * but we need to indicate whether there is enough space to do so
+ * here. We'll make use of ERR_PTR for this purpose.
+ */
+ if (size >= I40E_FLEX_PIT_TABLE_SIZE)
+ return ERR_PTR(-ENOSPC);
+
+ return NULL;
+}
+
+/**
+ * i40e_add_flex_offset - Add src_offset to flex PIT table list
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to add
+ * @pit_index: the PIT index to program
+ *
+ * This function programs the new src_offset to the list. It is expected that
+ * i40e_find_flex_offset has already been tried and returned NULL, indicating
+ * that this offset is not programmed, and that the list has enough space to
+ * store another offset.
+ *
+ * Returns 0 on success, and negative value on error.
+ **/
+static int i40e_add_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset,
+ u8 pit_index)
+{
+ struct i40e_flex_pit *new_pit, *entry;
+
+ new_pit = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!new_pit)
+ return -ENOMEM;
+
+ new_pit->src_offset = src_offset;
+ new_pit->pit_index = pit_index;
+
+ /* We need to insert this item such that the list is sorted by
+ * src_offset in ascending order.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ if (new_pit->src_offset < entry->src_offset) {
+ list_add_tail(&new_pit->list, &entry->list);
+ return 0;
+ }
+
+ /* If we found an entry with our offset already programmed we
+ * can simply return here, after freeing the memory. However,
+ * if the pit_index does not match we need to report an error.
+ */
+ if (new_pit->src_offset == entry->src_offset) {
+ int err = 0;
+
+ /* If the PIT index is not the same we can't re-use
+ * the entry, so we must report an error.
+ */
+ if (new_pit->pit_index != entry->pit_index)
+ err = -EINVAL;
+
+ kfree(new_pit);
+ return err;
+ }
+ }
+
+ /* If we reached here, then we haven't yet added the item. This means
+ * that we should add the item at the end of the list.
+ */
+ list_add_tail(&new_pit->list, flex_pit_list);
+ return 0;
+}
+
+/**
+ * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
+ * @pf: Pointer to the PF structure
+ * @flex_pit_list: list of flexible src offsets in use
+ * #flex_pit_start: index to first entry for this section of the table
+ *
+ * In order to handle flexible data, the hardware uses a table of values
+ * called the FLX_PIT table. This table is used to indicate which sections of
+ * the input correspond to what PIT index values. Unfortunately, hardware is
+ * very restrictive about programming this table. Entries must be ordered by
+ * src_offset in ascending order, without duplicates. Additionally, unused
+ * entries must be set to the unused index value, and must have valid size and
+ * length according to the src_offset ordering.
+ *
+ * This function will reprogram the FLX_PIT register from a book-keeping
+ * structure that we guarantee is already ordered correctly, and has no more
+ * than 3 entries.
+ *
+ * To make things easier, we only support flexible values of one word length,
+ * rather than allowing variable length flexible values.
+ **/
+static void __i40e_reprogram_flex_pit(struct i40e_pf *pf,
+ struct list_head *flex_pit_list,
+ int flex_pit_start)
+{
+ struct i40e_flex_pit *entry = NULL;
+ u16 last_offset = 0;
+ int i = 0, j = 0;
+
+ /* First, loop over the list of flex PIT entries, and reprogram the
+ * registers.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ /* We have to be careful when programming values for the
+ * largest SRC_OFFSET value. It is possible that adding
+ * additional empty values at the end would overflow the space
+ * for the SRC_OFFSET in the FLX_PIT register. To avoid this,
+ * we check here and add the empty values prior to adding the
+ * largest value.
+ *
+ * To determine this, we will use a loop from i+1 to 3, which
+ * will determine whether the unused entries would have valid
+ * SRC_OFFSET. Note that there cannot be extra entries past
+ * this value, because the only valid values would have been
+ * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not
+ * have been added to the list in the first place.
+ */
+ for (j = i + 1; j < 3; j++) {
+ u16 offset = entry->src_offset + j;
+ int index = flex_pit_start + i;
+ u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ offset - 3);
+
+ if (offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(index),
+ value);
+ i++;
+ }
+ }
+
+ /* Now, we can program the actual value into the table */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(entry->pit_index + 50,
+ 1,
+ entry->src_offset));
+ i++;
+ }
+
+ /* In order to program the last entries in the table, we need to
+ * determine the valid offset. If the list is empty, we'll just start
+ * with 0. Otherwise, we'll start with the last item offset and add 1.
+ * This ensures that all entries have valid sizes. If we don't do this
+ * correctly, the hardware will disable flexible field parsing.
+ */
+ if (!list_empty(flex_pit_list))
+ last_offset = list_prev_entry(entry, list)->src_offset + 1;
+
+ for (; i < 3; i++, last_offset++) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ last_offset));
+ }
+}
+
+/**
+ * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
+ * @pf: pointer to the PF structure
+ *
+ * This function reprograms both the L3 and L4 FLX_PIT tables. See the
+ * internal helper function for implementation details.
+ **/
+static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
+{
+ __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L3);
+
+ __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L4);
+
+ /* We also need to program the L3 and L4 GLQF ORT register */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3,
+ 3, 1));
+
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4,
+ 3, 1));
+}
+
+/**
+ * i40e_flow_str - Converts a flow_type into a human readable string
+ * @flow_type: the flow type from a flow specification
+ *
+ * Currently only flow types we support are included here, and the string
+ * value attempts to match what ethtool would use to configure this flow type.
+ **/
+static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ return "tcp4";
+ case UDP_V4_FLOW:
+ return "udp4";
+ case SCTP_V4_FLOW:
+ return "sctp4";
+ case IP_USER_FLOW:
+ return "ip4";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
+ * @pit_index: PIT index to convert
+ *
+ * Returns the mask for a given PIT index. Will return 0 if the pit_index is
+ * of range.
+ **/
+static u64 i40e_pit_index_to_mask(int pit_index)
+{
+ switch (pit_index) {
+ case 0:
+ return I40E_FLEX_50_MASK;
+ case 1:
+ return I40E_FLEX_51_MASK;
+ case 2:
+ return I40E_FLEX_52_MASK;
+ case 3:
+ return I40E_FLEX_53_MASK;
+ case 4:
+ return I40E_FLEX_54_MASK;
+ case 5:
+ return I40E_FLEX_55_MASK;
+ case 6:
+ return I40E_FLEX_56_MASK;
+ case 7:
+ return I40E_FLEX_57_MASK;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * i40e_print_input_set - Show changes between two input sets
+ * @vsi: the vsi being configured
+ * @old: the old input set
+ * @new: the new input set
+ *
+ * Print the difference between old and new input sets by showing which series
+ * of words are toggled on or off. Only displays the bits we actually support
+ * changing.
+ **/
+static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new)
+{
+ struct i40e_pf *pf = vsi->back;
+ bool old_value, new_value;
+ int i;
+
+ old_value = !!(old & I40E_L3_SRC_MASK);
+ new_value = !!(new & I40E_L3_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L3_DST_MASK);
+ new_value = !!(new & I40E_L3_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_SRC_MASK);
+ new_value = !!(new & I40E_L4_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_DST_MASK);
+ new_value = !!(new & I40E_L4_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_VERIFY_TAG_MASK);
+ new_value = !!(new & I40E_VERIFY_TAG_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ /* Show change of flexible filter entries */
+ for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) {
+ u64 flex_mask = i40e_pit_index_to_mask(i);
+
+ old_value = !!(old & flex_mask);
+ new_value = !!(new & flex_mask);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n",
+ i,
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+ }
+
+ netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n",
+ old);
+ netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n",
+ new);
+}
+
+/**
+ * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
+ * @vsi: pointer to the targeted VSI
+ * @fsp: pointer to Rx flow specification
+ * @userdef: userdefined data from flow specification
+ *
+ * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support
+ * for partial matches exists with a few limitations. First, hardware only
+ * supports masking by word boundary (2 bytes) and not per individual bit.
+ * Second, hardware is limited to using one mask for a flow type and cannot
+ * use a separate mask for each filter.
+ *
+ * To support these limitations, if we already have a configured filter for
+ * the specified type, this function enforces that new filters of the type
+ * match the configured input set. Otherwise, if we do not have a filter of
+ * the specified type, we allow the input set to be updated to match the
+ * desired filter.
+ *
+ * To help ensure that administrators understand why filters weren't displayed
+ * as supported, we print a diagnostic message displaying how the input set
+ * would change and warning to delete the preexisting filters if required.
+ *
+ * Returns 0 on successful input set match, and a negative return code on
+ * failure.
+ **/
+static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
+ struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *userdef)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ u64 current_mask, new_mask;
+ bool new_flex_offset = false;
+ bool flex_l3 = false;
+ u16 *fdir_filter_count;
+ u16 index, src_offset = 0;
+ u8 pit_index = 0;
+ int err;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ fdir_filter_count = &pf->fd_sctp4_filter_cnt;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ fdir_filter_count = &pf->fd_tcp4_filter_cnt;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ fdir_filter_count = &pf->fd_udp4_filter_cnt;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ fdir_filter_count = &pf->fd_ip4_filter_cnt;
+ flex_l3 = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Read the current input set from register memory. */
+ current_mask = i40e_read_fd_input_set(pf, index);
+ new_mask = current_mask;
+
+ /* Determine, if any, the required changes to the input set in order
+ * to support the provided mask.
+ *
+ * Hardware only supports masking at word (2 byte) granularity and does
+ * not support full bitwise masking. This implementation simplifies
+ * even further and only supports fully enabled or fully disabled
+ * masks for each field, even though we could split the ip4src and
+ * ip4dst fields.
+ */
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ /* Fall through */
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
+
+ /* IPv4 source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!tcp_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!tcp_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip4_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip4_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
+
+ /* IPv4 source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!usr_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!usr_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* First 4 bytes of L4 header */
+ if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+ else if (!usr_ip4_spec->l4_4_bytes)
+ new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (usr_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ /* Filtering on IP version is not supported */
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip4_spec->proto)
+ return -EINVAL;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* First, clear all flexible filter entries */
+ new_mask &= ~I40E_FLEX_INPUT_MASK;
+
+ /* If we have a flexible filter, try to add this offset to the correct
+ * flexible filter PIT list. Once finished, we can update the mask.
+ * If the src_offset changed, we will get a new mask value which will
+ * trigger an input set change.
+ */
+ if (userdef->flex_filter) {
+ struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL;
+
+ /* Flexible offset must be even, since the flexible payload
+ * must be aligned on 2-byte boundary.
+ */
+ if (userdef->flex_offset & 0x1) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data offset must be 2-byte aligned\n");
+ return -EINVAL;
+ }
+
+ src_offset = userdef->flex_offset >> 1;
+
+ /* FLX_PIT source offset value is only so large */
+ if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data must reside within first 64 bytes of the packet payload\n");
+ return -EINVAL;
+ }
+
+ /* See if this offset has already been programmed. If we get
+ * an ERR_PTR, then the filter is not safe to add. Otherwise,
+ * if we get a NULL pointer, this means we will need to add
+ * the offset.
+ */
+ flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list,
+ src_offset);
+ if (IS_ERR(flex_pit))
+ return PTR_ERR(flex_pit);
+
+ /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown)
+ * packet types, and thus we need to program both L3 and L4
+ * flexible values. These must have identical flexible index,
+ * as otherwise we can't correctly program the input set. So
+ * we'll find both an L3 and L4 index and make sure they are
+ * the same.
+ */
+ if (flex_l3) {
+ l3_flex_pit =
+ i40e_find_flex_offset(&pf->l3_flex_pit_list,
+ src_offset);
+ if (IS_ERR(l3_flex_pit))
+ return PTR_ERR(l3_flex_pit);
+
+ if (flex_pit) {
+ /* If we already had a matching L4 entry, we
+ * need to make sure that the L3 entry we
+ * obtained uses the same index.
+ */
+ if (l3_flex_pit) {
+ if (l3_flex_pit->pit_index !=
+ flex_pit->pit_index) {
+ return -EINVAL;
+ }
+ } else {
+ new_flex_offset = true;
+ }
+ } else {
+ flex_pit = l3_flex_pit;
+ }
+ }
+
+ /* If we didn't find an existing flex offset, we need to
+ * program a new one. However, we don't immediately program it
+ * here because we will wait to program until after we check
+ * that it is safe to change the input set.
+ */
+ if (!flex_pit) {
+ new_flex_offset = true;
+ pit_index = i40e_unused_pit_index(pf);
+ } else {
+ pit_index = flex_pit->pit_index;
+ }
+
+ /* Update the mask with the new offset */
+ new_mask |= i40e_pit_index_to_mask(pit_index);
+ }
+
+ /* If the mask and flexible filter offsets for this filter match the
+ * currently programmed values we don't need any input set change, so
+ * this filter is safe to install.
+ */
+ if (new_mask == current_mask && !new_flex_offset)
+ return 0;
+
+ netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n",
+ i40e_flow_str(fsp));
+ i40e_print_input_set(vsi, current_mask, new_mask);
+ if (new_flex_offset) {
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d",
+ pit_index, src_offset);
+ }
+
+ /* Hardware input sets are global across multiple ports, so even the
+ * main port cannot change them when in MFP mode as this would impact
+ * any filters on the other ports.
+ */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* This filter requires us to update the input set. However, hardware
+ * only supports one input set per flow type, and does not support
+ * separate masks for each filter. This means that we can only support
+ * a single mask for all filters of a specific type.
+ *
+ * If we have preexisting filters, they obviously depend on the
+ * current programmed input set. Display a diagnostic message in this
+ * case explaining why the filter could not be accepted.
+ */
+ if (*fdir_filter_count) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n",
+ i40e_flow_str(fsp),
+ *fdir_filter_count);
+ return -EOPNOTSUPP;
+ }
+
+ i40e_write_fd_input_set(pf, index, new_mask);
+
+ /* Add the new offset and update table, if necessary */
+ if (new_flex_offset) {
+ err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
+ pit_index);
+ if (err)
+ return err;
+
+ if (flex_l3) {
+ err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
+ src_offset,
+ pit_index);
+ if (err)
+ return err;
+ }
+
+ i40e_reprogram_flex_pit(pf);
+ }
+
+ return 0;
+}
+
+/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
@@ -2699,11 +3628,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct ethtool_rxnfc *cmd)
{
+ struct i40e_rx_flow_userdef userdef;
struct ethtool_rx_flow_spec *fsp;
struct i40e_fdir_filter *input;
+ u16 dest_vsi = 0, q_index = 0;
struct i40e_pf *pf;
int ret = -EINVAL;
- u16 vf_id;
+ u8 dest_ctl;
if (!vsi)
return -EINVAL;
@@ -2712,26 +3643,61 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
- if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
return -ENOSPC;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ /* Parse the user-defined field */
+ if (i40e_parse_rx_flow_user_data(fsp, &userdef))
+ return -EINVAL;
+
+ /* Extended MAC field is not supported */
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = i40e_check_fdir_input_set(vsi, fsp, &userdef);
+ if (ret)
+ return ret;
+
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
pf->hw.func_caps.fd_filters_guaranteed)) {
return -EINVAL;
}
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= vsi->num_queue_pairs))
- return -EINVAL;
+ /* ring_cookie is either the drop index, or is a mask of the queue
+ * index and VF id we wish to target.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf) {
+ if (ring >= vsi->num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = vsi->id;
+ } else {
+ /* VFs are zero-indexed, so we subtract one here */
+ vf--;
+
+ if (vf >= pf->num_alloc_vfs)
+ return -EINVAL;
+ if (ring >= pf->vf[vf].num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = pf->vf[vf].lan_vsi_id;
+ }
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ q_index = ring;
+ }
input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -2739,20 +3705,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -ENOMEM;
input->fd_id = fsp->location;
-
- if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
- input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
- else
- input->dest_ctl =
- I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
-
- input->q_index = fsp->ring_cookie;
- input->flex_off = 0;
- input->pctype = 0;
- input->dest_vsi = vsi->id;
+ input->q_index = q_index;
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
- input->flow_type = fsp->flow_type;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->flow_type = fsp->flow_type & ~FLOW_EXT;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
/* Reverse the src and dest notion, since the HW expects them to be from
@@ -2760,33 +3720,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
*/
input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
-
- if (ntohl(fsp->m_ext.data[1])) {
- vf_id = ntohl(fsp->h_ext.data[1]);
- if (vf_id >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev,
- "Invalid VF id %d\n", vf_id);
- goto free_input;
- }
- /* Find vsi id from vf id and override dest vsi */
- input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
- if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev,
- "Invalid queue id %d for VF %d\n",
- input->q_index, vf_id);
- goto free_input;
- }
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ if (userdef.flex_filter) {
+ input->flex_filter = true;
+ input->flex_word = cpu_to_be16(userdef.flex_word);
+ input->flex_offset = userdef.flex_offset;
}
ret = i40e_add_del_fdir(vsi, input, true);
-free_input:
if (ret)
- kfree(input);
- else
- i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ goto free_input;
+
+ /* Add the input filter to the fdir_input_list, possibly replacing
+ * a previous filter. Do not free the input structure after adding it
+ * to the list as this would cause a use-after-free bug.
+ */
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ return 0;
+
+free_input:
+ kfree(input);
return ret;
}
@@ -3036,7 +3992,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
* @dev: network interface device structure
*
* The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
* array.
*
* Returns a u32 bitmap of flags.
@@ -3046,19 +4002,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 ret_flags = 0;
+ u32 i, j, ret_flags = 0;
+
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i);
+ }
+
+ if (pf->hw.pf_id != 0)
+ return ret_flags;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
- ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
- I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
- ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
- I40E_PRIV_FLAGS_FD_ATR : 0;
- ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
- I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
- 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
- if (pf->hw.pf_id == 0) {
- ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
- I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i + j);
}
return ret_flags;
@@ -3074,54 +4038,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u16 sw_flags = 0, valid_flags = 0;
- bool reset_required = false;
- bool promisc_change = false;
- int ret;
+ u64 changed_flags;
+ u32 i, j;
- /* NOTE: MFP is not settable */
+ changed_flags = pf->flags;
- if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
- pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
- else
- pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
- /* allow the user to control the state of the Flow
- * Director ATR (Application Targeted Routing) feature
- * of the driver
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i))
+ pf->flags |= priv_flags->flag;
+ else
+ pf->flags &= ~(priv_flags->flag);
+ }
+
+ if (pf->hw.pf_id != 0)
+ goto flags_complete;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i + j))
+ pf->flags |= priv_flags->flag;
+ else
+ pf->flags &= ~(priv_flags->flag);
+ }
+
+flags_complete:
+ /* check for flags that changed */
+ changed_flags ^= pf->flags;
+
+ /* Process any additional changes needed as a result of flag changes.
+ * The changed_flags value reflects the list of bits that were
+ * changed in the code above.
*/
- if (flags & I40E_PRIV_FLAGS_FD_ATR) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- } else {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
-
- /* flush current ATR settings */
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- }
-
- if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
- !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
- reset_required = true;
- }
-
- if (pf->hw.pf_id == 0) {
- if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
- pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
- promisc_change = true;
- } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
- (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
- pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
- promisc_change = true;
- }
+
+ /* Flush current ATR settings if ATR was disabled */
+ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
- if (promisc_change) {
+
+ /* Only allow ATR evict on hardware that is capable of handling it */
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+
+ if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ u16 sw_flags = 0, valid_flags = 0;
+ int ret;
+
if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
@@ -3137,22 +4113,17 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
}
- if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
- (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
- pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
- else
- pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ /* Issue reset to cause things to take effect, as additional bits
+ * are added we will need to create a mask of bits requiring reset
+ */
+ if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
+ ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
static const struct ethtool_ops i40e_ethtool_ops = {
- .get_settings = i40e_get_settings,
- .set_settings = i40e_set_settings,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
@@ -3189,6 +4160,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_priv_flags = i40e_set_priv_flags,
.get_per_queue_coalesce = i40e_get_per_queue_coalesce,
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+ .get_link_ksettings = i40e_get_link_ksettings,
+ .set_link_ksettings = i40e_set_link_ksettings,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 82a95cc2c8ee..d5c9c9e06ff5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -32,6 +32,12 @@
#include "i40e.h"
#include "i40e_diag.h"
#include <net/udp_tunnel.h>
+/* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -39,9 +45,9 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -50,13 +56,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -286,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -299,11 +308,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev)
-#else
static void i40e_tx_timeout(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -372,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev)
switch (pf->tx_timeout_recovery_level) {
case 1:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case 2:
- set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
break;
case 3:
- set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
@@ -408,10 +413,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
-#ifndef I40E_FCOE
-static
-#endif
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -420,7 +422,7 @@ void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (!vsi->tx_rings)
@@ -723,55 +725,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
veb->stat_offsets_loaded = true;
}
-#ifdef I40E_FCOE
-/**
- * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
- * @vsi: the VSI that is capable of doing FCoE
- **/
-static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_fcoe_stats *ofs;
- struct i40e_fcoe_stats *fs; /* device's eth stats */
- int idx;
-
- if (vsi->type != I40E_VSI_FCOE)
- return;
-
- idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
- fs = &vsi->fcoe_stats;
- ofs = &vsi->fcoe_stats_offsets;
-
- i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
- i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
- i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_last_error, &fs->fcoe_last_error);
- i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
-
- vsi->fcoe_stat_offsets_loaded = true;
-}
-
-#endif
/**
* i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
@@ -790,7 +743,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
- u64 tx_lost_interrupt;
struct i40e_ring *p;
u32 rx_page, rx_buf;
u64 bytes, packets;
@@ -801,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u64 tx_p, tx_b;
u16 q;
- if (test_bit(__I40E_DOWN, &vsi->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
ns = i40e_get_vsi_stats_struct(vsi);
@@ -816,7 +768,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
- tx_lost_interrupt = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -835,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -854,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
- vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1101,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
nsd->fd_atr_status = true;
else
nsd->fd_atr_status = false;
@@ -1129,9 +1078,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
-#ifdef I40E_FCOE
- i40e_update_fcoe_stats(vsi);
-#endif
}
/**
@@ -1400,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* to failed, so we don't bother to try sending the filter
* to the hardware.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
@@ -1562,11 +1508,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
*
* Returns 0 on success, negative on failure
**/
-#ifdef I40E_FCOE
-int i40e_set_mac(struct net_device *netdev, void *p)
-#else
static int i40e_set_mac(struct net_device *netdev, void *p)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1583,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
@@ -1626,17 +1568,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*
* Setup VSI queue mapping for enabled traffic classes.
**/
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
- struct i40e_vsi_context *ctxt,
- u8 enabled_tc,
- bool is_add)
-#else
static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
-#endif
{
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
@@ -1686,11 +1621,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- qcount = num_tc_qps;
- break;
-#endif
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -1800,11 +1730,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
-#ifdef I40E_FCOE
-void i40e_set_rx_mode(struct net_device *netdev)
-#else
static void i40e_set_rx_mode(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1883,19 +1809,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
static
struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
{
- while (next) {
- next = hlist_entry(next->hlist.next,
- typeof(struct i40e_new_mac_filter),
- hlist);
-
- /* keep going if we found a broadcast filter */
- if (next && is_broadcast_ether_addr(next->f->macaddr))
- continue;
-
- break;
+ hlist_for_each_entry_continue(next, hlist) {
+ if (!is_broadcast_ether_addr(next->f->macaddr))
+ return next;
}
- return next;
+ return NULL;
}
/**
@@ -2001,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (fcnt != num_add) {
*promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
@@ -2084,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_aqc_add_macvlan_element_data *add_list;
struct i40e_aqc_remove_macvlan_element_data *del_list;
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
usleep_range(1000, 2000);
pf = vsi->back;
@@ -2220,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_add = 0;
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state)) {
new->state = I40E_FILTER_FAILED;
continue;
}
@@ -2308,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
!promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name);
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
promisc_changed = true;
vsi->promisc_threshold = 0;
}
/* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
goto out;
}
@@ -2346,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if ((changed_flags & IFF_PROMISC) ||
(promisc_changed &&
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
+ test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state));
+ test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state));
if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
@@ -2434,7 +2353,7 @@ out:
if (retval)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return retval;
err_no_memory:
@@ -2446,7 +2365,7 @@ err_no_memory_locked:
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return -ENOMEM;
}
@@ -2487,13 +2406,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
- i40e_notify_client_of_l2_param_changes(vsi);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
return 0;
}
@@ -2707,13 +2628,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
*
* net_device_ops implementation for adding vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2744,13 +2660,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2920,9 +2831,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_setup_ddp_resources(vsi);
-#endif
return err;
}
@@ -2942,9 +2850,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_free_ddp_resources(vsi);
-#endif
}
/**
@@ -3015,9 +2920,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.qlen = ring->count;
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
-#ifdef I40E_FCOE
- tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
@@ -3098,7 +3000,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->rx_buf_len = vsi->rx_buf_len;
- rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
@@ -3120,9 +3023,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
rx_ctx.showiv = 0;
-#ifdef I40E_FCOE
- rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
/* set the prefena field to 1 because the manual says to */
rx_ctx.prefena = 1;
@@ -3144,6 +3044,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
return -ENOMEM;
}
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ clear_ring_build_skb_enabled(ring);
+ else
+ set_ring_build_skb_enabled(ring);
+
/* cache tail for quicker writes, and clear the reg before use */
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -3181,27 +3087,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
- vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
- + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = I40E_RXBUFFER_2048;
-
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
-
-#ifdef I40E_FCOE
- /* setup rx buffer for FCoE */
- if ((vsi->type == I40E_VSI_FCOE) &&
- (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_buf_len = I40E_RXBUFFER_3072;
- vsi->max_frame = I40E_RXBUFFER_3072;
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+ I40E_RXBUFFER_2048;
}
-#endif /* I40E_FCOE */
- /* round up for the chip's needs */
- vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_configure_rx_ring(vsi->rx_rings[i]);
@@ -3281,6 +3181,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
+ /* Reset FDir counters as we're replaying all existing filters */
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
i40e_add_del_fdir(vsi, filter, true);
@@ -3705,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data)
* this is not a performance path and napi_schedule()
* can deal with rescheduling.
*/
- if (!test_bit(__I40E_DOWN, &pf->state))
+ if (!test_bit(__I40E_VSI_DOWN, pf->state))
napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
}
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
@@ -3738,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
}
}
@@ -3771,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
@@ -3781,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, &pf->state)) {
+ if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf, false);
}
@@ -3993,11 +3899,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
-#ifdef I40E_FCOE
-void i40e_netpoll(struct net_device *netdev)
-#else
static void i40e_netpoll(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -4005,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -4017,6 +3919,8 @@ static void i40e_netpoll(struct net_device *netdev)
}
#endif
+#define I40E_QTX_ENA_WAIT_COUNT 50
+
/**
* i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
* @pf: the PF being configured
@@ -4047,6 +3951,50 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_tx_q - Start or stop a particular Tx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 tx_reg;
+ int i;
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ usleep_range(10, 20);
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4054,41 +4002,11 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 tx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-
- /* warn the TX unit of coming changes */
- i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
- if (!enable)
- usleep_range(10, 20);
-
- for (j = 0; j < 50; j++) {
- tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
- /* Skip if the queue is already in the requested state */
- if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable) {
- wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- } else {
- tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- }
-
- wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
- continue;
+ i40e_control_tx_q(pf, pf_q, enable);
/* wait for the change to finish */
ret = i40e_pf_txq_wait(pf, pf_q, enable);
@@ -4100,8 +4018,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
- if (hw->revision_id == 0)
- mdelay(50);
return ret;
}
@@ -4135,6 +4051,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_rx_q - Start or stop a particular Rx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 rx_reg;
+ int i;
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ else
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+}
+
+/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4142,33 +4095,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 rx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- for (j = 0; j < 50; j++) {
- rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
-
- /* Skip if the queue is already in the requested state */
- if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- else
- rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
- continue;
+ i40e_control_rx_q(pf, pf_q, enable);
/* wait for the change to finish */
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4180,6 +4111,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
}
}
+ /* Due to HW errata, on Rx disable only, the register can indicate done
+ * before it really is. Needs 50ms to be sure
+ */
+ if (!enable)
+ mdelay(50);
+
return ret;
}
@@ -4206,6 +4143,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ /* When port TX is suspended, don't wait */
+ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
+ return i40e_vsi_stop_rings_no_wait(vsi);
+
/* do rx first for enable and last for disable
* Ignore return value, we need to shutdown whatever we can
*/
@@ -4214,6 +4155,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
+ * @vsi: the VSI being shutdown
+ *
+ * This function stops all the rings for a VSI but does not delay to verify
+ * that rings have been disabled. It is expected that the caller is shutting
+ * down multiple VSIs at once and will delay together for all the VSIs after
+ * initiating the shutdown. This is particularly useful for shutting down lots
+ * of VFs together. Otherwise, a large delay can be incurred while configuring
+ * each VSI in serial.
+ **/
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ i40e_control_tx_q(pf, pf_q, false);
+ i40e_control_rx_q(pf, pf_q, false);
+ }
+}
+
+/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
**/
@@ -4471,17 +4435,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
- bool reset = false;
-
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ struct i40e_pf *pf = vsi->back;
+ if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- reset = true;
- i40e_notify_client_of_netdev_close(vsi, reset);
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ pf->flags |= I40E_FLAG_CLIENT_RESET;
}
/**
@@ -4490,18 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
**/
static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
{
- if (test_bit(__I40E_DOWN, &vsi->state))
- return;
-
- /* No need to disable FCoE VSI when Tx suspended */
- if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
- vsi->type == I40E_VSI_FCOE) {
- dev_dbg(&vsi->back->pdev->dev,
- "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- }
- set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
else
@@ -4514,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
{
- if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
return;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
@@ -4552,21 +4506,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
}
}
-#ifdef CONFIG_I40E_DCB
/**
* i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
* @vsi: the VSI being configured
*
- * This function waits for the given VSI's queues to be disabled.
+ * Wait until all queues on a given VSI have been disabled.
**/
-static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ /* Check and wait for the Tx queue */
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4574,11 +4527,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
vsi->seid, pf_q);
return ret;
}
- }
-
- pf_q = vsi->base_queue;
- for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ /* Check and wait for the Tx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4591,6 +4540,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
return 0;
}
+#ifdef CONFIG_I40E_DCB
/**
* i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
* @pf: the PF
@@ -4603,8 +4553,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- /* No need to wait for FCoE VSI queues */
- if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+ if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
break;
@@ -4622,16 +4571,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
* @vsi: Pointer to VSI struct
*
* This function checks specified queue for given VSI. Detects hung condition.
- * Sets hung bit since it is two step process. Before next run of service task
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
- * hung condition remain unchanged and during subsequent run, this function
- * issues SW interrupt to recover from hung condition.
+ * We proactively detect hung TX queues by checking if interrupts are disabled
+ * but there are pending descriptors. If it appears hung, attempt to recover
+ * by triggering a SW interrupt.
**/
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf;
- u32 head, val, tx_pending_hw;
+ u32 val, tx_pending;
int i;
pf = vsi->back;
@@ -4657,47 +4605,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
- head = i40e_get_head(tx_ring);
-
- tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
-
- /* HW is done executing descriptors, updated HEAD write back,
- * but SW hasn't processed those descriptors. If interrupt is
- * not generated from this point ON, it could result into
- * dev_watchdog detecting timeout on those netdev_queue,
- * hence proactively trigger SW interrupt.
- */
- if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- /* NAPI Poll didn't run and clear since it was set */
- if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected)) {
- netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
- vsi->seid, q_idx, tx_pending_hw,
- tx_ring->next_to_clean, head,
- tx_ring->next_to_use,
- readl(tx_ring->tail));
- netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
- vsi->seid, q_idx, val);
- i40e_force_wb(vsi, tx_ring->q_vector);
- } else {
- /* First Chance - detected possible hung */
- set_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected);
- }
- }
+ tx_pending = i40e_get_tx_pending(tx_ring);
- /* This is the case where we have interrupts missing,
- * so the tx_pending in HW will most likely be 0, but we
- * will have tx_pending in SW since the WB happened but the
- * interrupt got lost.
+ /* Interrupts are disabled and TX pending is non-zero,
+ * trigger the SW interrupt (don't wait). Worst case
+ * there will be one extra interrupt which may result
+ * into not cleaning any queues because queues are cleaned.
*/
- if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
- (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- local_bh_disable();
- if (napi_reschedule(&tx_ring->q_vector->napi))
- tx_ring->tx_stats.tx_lost_interrupt++;
- local_bh_enable();
- }
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+ i40e_force_wb(vsi, tx_ring->q_vector);
}
/**
@@ -4721,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
return;
/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return;
/* Make sure type is MAIN VSI */
@@ -5228,20 +5144,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
continue;
/* - Enable all TCs for the LAN VSI
-#ifdef I40E_FCOE
- * - For FCoE VSI only enable the TC configured
- * as per the APP TLV
-#endif
* - For all others keep them at TC0 for now
*/
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
-#ifdef I40E_FCOE
- if (pf->vsi[v]->type == I40E_VSI_FCOE)
- tc_map = i40e_get_fcoe_tc_map(pf);
-#endif /* #ifdef I40E_FCOE */
ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
if (ret) {
@@ -5277,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
@@ -5308,10 +5216,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
(hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
dev_info(&pf->pdev->dev,
"DCBX offload is not supported or is disabled for this PF.\n");
-
- if (pf->flags & I40E_FLAG_MFP_ENABLED)
- goto out;
-
} else {
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -5449,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if (err)
return err;
- clear_bit(__I40E_DOWN, &vsi->state);
+ clear_bit(__I40E_VSI_DOWN, vsi->state);
i40e_napi_enable_all(vsi);
i40e_vsi_enable_irq(vsi);
@@ -5472,13 +5376,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* replay FDIR SB filters */
if (vsi->type == I40E_VSI_FDIR) {
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = 0;
- if (pf->fd_tcp_rule > 0) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
- pf->fd_tcp_rule = 0;
- }
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
i40e_fdir_filter_restore(vsi);
}
@@ -5503,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
WARN_ON(in_interrupt());
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
i40e_up(vsi);
- clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
}
/**
@@ -5535,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi)
int i;
/* It is assumed that the caller of this function
- * sets the vsi->state __I40E_DOWN bit.
+ * sets the vsi->state __I40E_VSI_DOWN bit.
*/
if (vsi->netdev) {
netif_carrier_off(vsi->netdev);
@@ -5550,8 +5449,6 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
- i40e_notify_client_of_netdev_close(vsi, false);
-
}
/**
@@ -5612,17 +5509,15 @@ exit:
return ret;
}
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
-#else
static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
-#endif
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return i40e_setup_tc(netdev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return i40e_setup_tc(netdev, tc->mqprio->num_tc);
}
/**
@@ -5645,8 +5540,8 @@ int i40e_open(struct net_device *netdev)
int err;
/* disallow open during test or if eeprom is broken */
- if (test_bit(__I40E_TESTING, &pf->state) ||
- test_bit(__I40E_BAD_EEPROM, &pf->state))
+ if (test_bit(__I40E_TESTING, pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, pf->state))
return -EBUSY;
netif_carrier_off(netdev);
@@ -5675,6 +5570,8 @@ int i40e_open(struct net_device *netdev)
* Finish initialization of the VSI.
*
* Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_vsi_open(struct i40e_vsi *vsi)
{
@@ -5738,7 +5635,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return err;
}
@@ -5753,6 +5650,7 @@ err_setup_tx:
static void i40e_fdir_filter_exit(struct i40e_pf *pf)
{
struct i40e_fdir_filter *filter;
+ struct i40e_flex_pit *pit_entry, *tmp;
struct hlist_node *node2;
hlist_for_each_entry_safe(filter, node2,
@@ -5760,7 +5658,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
}
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
pf->fdir_pf_active_filters = 0;
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
+ /* Reprogram the default input set for TCP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for UDP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for SCTP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -5787,12 +5721,14 @@ int i40e_close(struct net_device *netdev)
* i40e_do_reset - Start a PF or Core Reset sequence
* @pf: board private structure
* @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* The essential difference in resets is that the PF Reset
* doesn't clear the packet buffers, doesn't reset the PE
* firmware, and doesn't bother the other PFs on the chip.
**/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
@@ -5838,7 +5774,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the Core Reset.
*/
dev_dbg(&pf->pdev->dev, "PFR requested\n");
- i40e_handle_reset_warning(pf);
+ i40e_handle_reset_warning(pf, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -5850,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ vsi->state))
i40e_vsi_reinit_locked(pf->vsi[v]);
- clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
- }
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
@@ -5864,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
- set_bit(__I40E_DOWN, &vsi->state);
+ test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ vsi->state)) {
+ set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
- clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
} else {
@@ -6007,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -6016,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_resume_port_tx(pf);
- clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
/* In case of error no point in resuming VSIs */
if (ret)
goto exit;
@@ -6025,12 +5960,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_pf_wait_queues_disabled(pf);
if (ret) {
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- /* Notify the client for the DCB changes */
- i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
}
exit:
@@ -6047,7 +5982,7 @@ exit:
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
{
rtnl_lock();
- i40e_do_reset(pf, reset_flags);
+ i40e_do_reset(pf, reset_flags, true);
rtnl_unlock();
}
@@ -6140,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
u32 fcnt_prog, fcnt_avail;
struct hlist_node *node;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
- /* Check if, FD SB or ATR was auto disabled and if there is enough room
- * to re-enable
- */
+ /* Check if we have enough room to re-enable FDir SB capability. */
fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR. We also
- * must check that no existing ntuple rules for TCP are in effect
+ /* We should wait for even more space before re-enabling ATR.
+ * Additionally, we cannot enable ATR as long as we still have TCP SB
+ * rules active.
*/
- if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->fd_tcp_rule == 0)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
+ (pf->fd_tcp4_filter_cnt == 0)) {
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -6218,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6237,9 +6171,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -6269,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
{
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state))
return;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -6286,14 +6220,11 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
+ if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
return;
switch (vsi->type) {
case I40E_VSI_MAIN:
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
-#endif
if (!vsi->netdev || !vsi->netdev_registered)
break;
@@ -6382,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link &&
new_link_speed == old_link_speed &&
- (test_bit(__I40E_DOWN, &vsi->state) ||
+ (test_bit(__I40E_VSI_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -6413,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
@@ -6452,44 +6383,44 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
- rtnl_lock();
- if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_REINIT_REQUESTED);
- clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ clear_bit(__I40E_REINIT_REQUESTED, pf->state);
}
- if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
- clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
- clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
- clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= BIT(__I40E_DOWN_REQUESTED);
- clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+ if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
+ clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
}
/* If there's a recovery already waiting, it takes
* precedence before starting a new reset sequence.
*/
- if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
- i40e_handle_reset_warning(pf);
- goto unlock;
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ i40e_prep_for_reset(pf, false);
+ i40e_reset(pf);
+ i40e_rebuild(pf, false, false);
}
/* If we're already down or resetting, just bail */
if (reset_flags &&
- !test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_CONFIG_BUSY, &pf->state))
- i40e_do_reset(pf, reset_flags);
-
-unlock:
- rtnl_unlock();
+ !test_bit(__I40E_VSI_DOWN, pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags, true);
+ rtnl_unlock();
+ }
}
/**
@@ -6534,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 val;
/* Do not run clean AQ when PF reset fails */
- if (test_bit(__I40E_RESET_FAILED, &pf->state))
+ if (test_bit(__I40E_RESET_FAILED, pf->state))
return;
/* check for error indications */
@@ -6635,9 +6566,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (pending && (i++ < pf->adminq_work_limit));
+ } while (i++ < pf->adminq_work_limit);
+
+ if (i < pf->adminq_work_limit)
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
- clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
/* re-enable Admin queue interrupt cause */
val = rd32(hw, I40E_PFINT_ICR0_ENA);
val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -6662,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
err);
- set_bit(__I40E_BAD_EEPROM, &pf->state);
+ set_bit(__I40E_BAD_EEPROM, pf->state);
}
}
- if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
- clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ clear_bit(__I40E_BAD_EEPROM, pf->state);
}
}
@@ -6975,17 +6908,19 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* Close up the VFs and other things in prep for PF Reset.
**/
-static void i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret = 0;
u32 v;
- clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
- if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
if (i40e_check_asq_alive(&pf->hw))
i40e_vc_notify_reset(pf);
@@ -6993,7 +6928,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
+ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+ if (!lock_acquired)
+ rtnl_lock();
i40e_pf_quiesce_all_vsi(pf);
+ if (!lock_acquired)
+ rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
@@ -7028,31 +6968,41 @@ static void i40e_send_version(struct i40e_pf *pf)
}
/**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
**/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
- u32 val;
- u32 v;
- /* Now we wait for GRST to settle out.
- * We don't have to delete the VEBs or VSIs from the hw switch
- * because the reset will make them disappear.
- */
ret = i40e_pf_reset(hw);
if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
- set_bit(__I40E_RESET_FAILED, &pf->state);
- goto clear_recovery;
+ set_bit(__I40E_RESET_FAILED, pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ } else {
+ pf->pfr_count++;
}
- pf->pfr_count++;
+ return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
+ i40e_status ret;
+ u32 val;
+ int v;
- if (test_bit(__I40E_DOWN, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -7066,7 +7016,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* re-verify the eeprom if we just had an EMP reset */
- if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw);
@@ -7075,8 +7025,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (ret) {
dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
goto end_core_reset;
@@ -7095,14 +7044,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif
/* do basic switch setup */
+ if (!lock_acquired)
+ rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
- goto end_core_reset;
+ goto end_unlock;
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -7173,7 +7120,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of Main VSI failed: %d\n", ret);
- goto end_core_reset;
+ goto end_unlock;
}
}
@@ -7216,18 +7163,45 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
- if (pf->num_alloc_vfs) {
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_reset_vf(&pf->vf[v], true);
- }
+ /* Release the RTNL lock before we start resetting VFs */
+ if (!lock_acquired)
+ rtnl_unlock();
+
+ i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
i40e_send_version(pf);
+ /* We've already released the lock, so don't do it again */
+ goto end_core_reset;
+
+end_unlock:
+ if (!lock_acquired)
+ rtnl_unlock();
end_core_reset:
- clear_bit(__I40E_RESET_FAILED, &pf->state);
+ clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired)
+{
+ int ret;
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_reset(pf);
+ if (!ret)
+ i40e_rebuild(pf, reinit, lock_acquired);
}
/**
@@ -7236,11 +7210,13 @@ clear_recovery:
*
* Close up the VFs and other things in prep for a Core Reset,
* then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
**/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
{
- i40e_prep_for_reset(pf);
- i40e_reset_and_rebuild(pf, false);
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, false, lock_acquired);
}
/**
@@ -7258,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -7310,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/* Queue belongs to the PF, initiate a reset */
if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
}
@@ -7339,12 +7315,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
"Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
- set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
}
/* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -7352,6 +7328,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/**
+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters(struct i40e_pf *pf)
+{
+ int i;
+
+ /* loop through and set pending bit for all active UDP filters */
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->udp_ports[i].port)
+ pf->pending_udp_bitmap |= BIT_ULL(i);
+ }
+
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+}
+
+/**
* i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure
**/
@@ -7359,7 +7352,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- __be16 port;
+ u16 port;
int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7370,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].index;
+ port = pf->udp_ports[i].port;
if (port)
ret = i40e_aq_add_udp_tunnel(hw, port,
pf->udp_ports[i].type,
@@ -7383,11 +7376,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
"%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve",
port ? "add" : "delete",
- ntohs(port), i,
+ port, i,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].index = 0;
+ pf->udp_ports[i].port = 0;
}
}
}
@@ -7405,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
- }
- if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
i40e_detect_recover_hung(pf);
@@ -7419,23 +7411,34 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+ } else {
+ i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+ }
+ }
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
* rather than wait for the timer to tick again.
*/
if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
- test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
i40e_service_event_schedule(pf);
}
@@ -7492,15 +7495,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
I40E_REQ_DESCRIPTOR_MULTIPLE);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- vsi->alloc_queue_pairs = pf->num_fcoe_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = pf->num_fcoe_msix;
- break;
-
-#endif /* I40E_FCOE */
default:
WARN_ON(1);
return -ENODATA;
@@ -7593,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
}
vsi->type = type;
vsi->back = pf;
- set_bit(__I40E_DOWN, &vsi->state);
+ set_bit(__I40E_VSI_DOWN, vsi->state);
vsi->flags = 0;
vsi->idx = vsi_idx;
vsi->int_rate_limit = 0;
@@ -7817,6 +7811,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
static int i40e_init_msix(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
int vectors_left;
int v_budget, i;
int v_actual;
@@ -7835,9 +7830,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* - The CPU count within the NUMA node if iWARP is enabled
-#ifdef I40E_FCOE
- * - The number of FCOE qps.
-#endif
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
@@ -7852,10 +7844,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
}
- /* reserve vectors for the main PF traffic queues */
- pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
vectors_left -= pf->num_lan_msix;
- v_budget += pf->num_lan_msix;
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7868,20 +7866,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
-#ifdef I40E_FCOE
- /* can we reserve enough for FCoE? */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (!vectors_left)
- pf->num_fcoe_msix = 0;
- else if (vectors_left >= pf->num_fcoe_qps)
- pf->num_fcoe_msix = pf->num_fcoe_qps;
- else
- pf->num_fcoe_msix = 1;
- v_budget += pf->num_fcoe_msix;
- vectors_left -= pf->num_fcoe_msix;
- }
-
-#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
iwarp_requested = pf->num_iwarp_msix;
@@ -7918,6 +7902,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
if (!pf->msix_entries)
@@ -7958,10 +7959,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7975,13 +7972,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else {
pf->num_lan_msix = 2;
}
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_lan_msix = 1;
- pf->num_fcoe_msix = 1;
- }
-#endif
break;
default:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
@@ -8001,13 +7991,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
pf->num_lan_qps = pf->num_lan_msix;
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = 1;
- vec--;
- }
-#endif
break;
}
}
@@ -8028,13 +8011,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
}
-#ifdef I40E_FCOE
-
- if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
- dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- }
-#endif
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
pf->num_lan_msix,
@@ -8133,9 +8109,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED |
@@ -8196,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* Only request the irq if this is the first time through, and
* not when we're rebuilding after a Reset
*/
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
@@ -8368,13 +8341,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (vsi->type == I40E_VSI_MAIN) {
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
- seed_dw[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} else if (vsi->type == I40E_VSI_SRIOV) {
for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HKEY1(i, vf_id),
- seed_dw[i]);
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
}
@@ -8392,9 +8362,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HLUT1(i, vf_id),
- lut_dw[i]);
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
}
@@ -8522,9 +8490,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */
- if (!vsi->rss_size)
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ if (!vsi->rss_size) {
+ u16 qcount;
+
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+ }
if (!vsi->rss_size)
return -EINVAL;
@@ -8558,6 +8529,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
*
* returns 0 if rss is not enabled, if enabled returns the final rss queue
* count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
@@ -8570,12 +8542,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
+ u16 qcount;
+
vsi->req_queue_pairs = queue_count;
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
pf->alloc_rss_size = new_rss_size;
- i40e_reset_and_rebuild(pf, true);
+ i40e_reset_and_rebuild(pf, true, true);
/* Discard the user configured hash keys and lut, if less
* queues are enabled.
@@ -8587,8 +8561,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/* Reset vsi->rss_size, as number of enabled queues changed */
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
i40e_pf_config_rss(pf);
}
@@ -8821,10 +8795,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -8851,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
pf->eeprom_version = 0xDEAD;
@@ -8913,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_SB_AUTO_DISABLED);
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
- pf->fdir_pf_active_filters = 0;
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
}
@@ -8955,6 +8925,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -8978,7 +8949,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
@@ -8990,12 +8961,12 @@ static int i40e_set_features(struct net_device *netdev,
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
{
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].index == port)
+ if (pf->udp_ports[i].port == port)
return i;
}
@@ -9013,7 +8984,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 next_idx;
u8 idx;
@@ -9021,8 +8992,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n",
- ntohs(port));
+ netdev_info(netdev, "port %d already offloaded\n", port);
return;
}
@@ -9031,7 +9001,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- ntohs(port));
+ port);
return;
}
@@ -9049,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
}
/* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
+ pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
}
@@ -9065,7 +9035,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 idx;
idx = i40e_get_udp_port_idx(pf, port);
@@ -9090,14 +9060,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
/* if port exists, set it to 0 (mark for deletion)
* and make it pending
*/
- pf->udp_ports[idx].index = 0;
+ pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
return;
not_found:
netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- ntohs(port));
+ port);
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9174,6 +9144,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* is to change the mode then that requires a PF reset to
* allow rebuild of the components with required hardware
* bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -9229,7 +9201,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
+ true);
break;
}
}
@@ -9352,10 +9325,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
-#ifdef I40E_FCOE
- .ndo_fcoe_enable = i40e_fcoe_enable,
- .ndo_fcoe_disable = i40e_fcoe_disable,
-#endif
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
@@ -9388,6 +9357,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
u8 broadcast[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
int etherdev_size;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
etherdev_size = sizeof(struct i40e_netdev_priv);
netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -9398,52 +9369,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
- 0;
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= hw_enc_features;
+
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE;
+ hw_features = hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- netdev->hw_features |= netdev->hw_enc_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= hw_features;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- /* The following steps are necessary to prevent reception
- * of tagged packets - some older NVM configurations load a
- * default a MAC-VLAN filter that accepts any tagged packet
- * which must be replaced by a normal filter.
+ /* The following steps are necessary for two reasons. First,
+ * some older NVM configurations load a default MAC-VLAN
+ * filter that will accept any tagged packet, and we want to
+ * replace this with a normal filter. Additionally, it is
+ * possible our MAC address was provided by the platform using
+ * Open Firmware or similar.
+ *
+ * Thus, we need to remove the default filter and install one
+ * specific to the MAC address.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -9489,9 +9465,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->netdev_ops = &i40e_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
i40e_set_ethtool_ops(netdev);
-#ifdef I40E_FCOE
- i40e_fcoe_config_netdev(netdev, vsi);
-#endif
/* MTU range: 68 - 9706 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -9715,16 +9688,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- ret = i40e_fcoe_vsi_init(vsi, &ctxt);
- if (ret) {
- dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
- return ret;
- }
- break;
-
-#endif /* I40E_FCOE */
case I40E_VSI_IWARP:
/* send down message to iWARP */
break;
@@ -9751,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
vsi->active_filters = 0;
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
@@ -9804,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
return -ENODEV;
}
if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, &pf->state)) {
+ !test_bit(__I40E_VSI_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -10141,7 +10104,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
}
case I40E_VSI_VMDQ2:
- case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
@@ -10789,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
+ /* repopulate tunnel port filters */
+ i40e_sync_udp_filters(pf);
+
return ret;
}
@@ -10801,9 +10766,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
int queues_left;
pf->num_lan_qps = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
-#endif
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -10820,9 +10782,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
@@ -10839,9 +10798,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
@@ -10862,22 +10818,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (I40E_DEFAULT_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
- } else if (I40E_MINIMUM_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
- } else {
- pf->num_fcoe_qps = 0;
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
- }
-
- queues_left -= pf->num_fcoe_qps;
- }
-
-#endif
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
@@ -10909,9 +10849,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
-#ifdef I40E_FCOE
- dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
-#endif
}
/**
@@ -10978,10 +10915,6 @@ static void i40e_print_features(struct i40e_pf *pf)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " FCOE");
-#endif
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " VEB");
else
@@ -10994,20 +10927,18 @@ static void i40e_print_features(struct i40e_pf *pf)
/**
* i40e_get_platform_mac_addr - get platform-specific MAC address
- *
* @pdev: PCI device information struct
* @pf: board private structure
*
- * Look up the MAC address in Open Firmware on systems that support it,
- * and use IDPROM on SPARC if no OF address is found. On return, the
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
- * has been selected.
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
**/
static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
{
- pf->flags &= ~I40E_FLAG_PF_MAC;
- if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
- pf->flags |= I40E_FLAG_PF_MAC;
+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
}
/**
@@ -11072,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pf->next_vsi = 0;
pf->pdev = pdev;
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
hw = &pf->hw;
hw->back = pf;
@@ -11098,6 +11029,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
@@ -11196,8 +11130,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (err) {
dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
goto err_init_lan_hmc;
@@ -11219,9 +11152,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_stop_lldp(hw, true, NULL);
}
- i40e_get_mac_addr(hw, hw->mac.addr);
/* allow a platform config to override the HW addr */
i40e_get_platform_mac_addr(pdev, pf);
+
if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
@@ -11232,18 +11165,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
pf->flags |= I40E_FLAG_PORT_ID_VALID;
-#ifdef I40E_FCOE
- err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
- if (err)
- dev_info(&pdev->dev,
- "(non-fatal) SAN MAC retrieval failed: %d\n", err);
- if (!is_valid_ether_addr(hw->mac.san_addr)) {
- dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
- hw->mac.san_addr);
- ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
- }
- dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
-#endif /* I40E_FCOE */
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -11261,8 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->service_timer_period = HZ;
INIT_WORK(&pf->service_task, i40e_service_task);
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
- pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
@@ -11300,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
@@ -11373,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
*/
- clear_bit(__I40E_DOWN, &pf->state);
+ clear_bit(__I40E_VSI_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
@@ -11393,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
@@ -11434,16 +11354,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- err = i40e_lan_add_device(pf);
- if (err)
- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
- err);
-
-#ifdef I40E_FCOE
- /* create FCoE interface */
- i40e_fcoe_vsi_setup(pf);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+ }
-#endif
#define PCI_SPEED_SIZE 8
#define PCI_WIDTH_SIZE 8
/* Devices on the IOSF bus do not have this information
@@ -11531,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Unwind what we've done if something failed in the setup */
err_vsis:
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
err_switch_setup:
@@ -11582,13 +11499,18 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->service_timer.data)
del_timer_sync(&pf->service_timer);
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -11615,10 +11537,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
/* remove attached clients */
- ret_code = i40e_lan_del_device(pf);
- if (ret_code) {
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
- ret_code);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
}
/* shutdown and destroy the HMC */
@@ -11685,9 +11608,9 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ if (!test_bit(__I40E_SUSPENDED, pf->state)) {
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
}
@@ -11752,11 +11675,11 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (test_bit(__I40E_SUSPENDED, &pf->state))
+ if (test_bit(__I40E_SUSPENDED, pf->state))
return;
rtnl_lock();
- i40e_handle_reset_warning(pf);
+ i40e_handle_reset_warning(pf, true);
rtnl_unlock();
}
@@ -11816,10 +11739,10 @@ static void i40e_shutdown(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11829,11 +11752,16 @@ static void i40e_shutdown(struct pci_dev *pdev)
cancel_work_sync(&pf->service_task);
i40e_fdir_teardown(pf);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM,
@@ -11860,14 +11788,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
struct i40e_hw *hw = &pf->hw;
int retval = 0;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11912,10 +11840,10 @@ static int i40e_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
/* handling the reset will rebuild the device state */
- if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
- clear_bit(__I40E_DOWN, &pf->state);
+ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
+ clear_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock();
- i40e_reset_and_rebuild(pf, false);
+ i40e_reset_and_rebuild(pf, false, true);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 38ee18f11124..800bd55d0159 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index fea81ed065db..80e66da6b145 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,4 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#ifdef CONFIG_I40E_FCOE
-#define I40E_FCOE
-#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2551fc827444..c56d976cf85a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
-#ifdef I40E_FCOE
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-#endif
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -380,4 +377,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 2caee35528fa..18c1cc08da97 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
- clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
/**
@@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
- clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
if (pf->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
new file mode 100644
index 000000000000..d3e55f54a05e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -0,0 +1,229 @@
+/*******************************************************************************
+ *
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40e will be "i40e".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40e
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40e ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40e_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40e_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40e_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the PF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 97d46058d71d..29321a6167a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
#include "i40e.h"
+#include "i40e_trace.h"
#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
@@ -71,6 +72,9 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
/* Use LAN VSI Id if not programmed by user */
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
@@ -203,7 +207,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct udphdr *udp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -219,18 +222,28 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
udp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
udp->source = fd_data->src_port;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -241,10 +254,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
- return err ? -EOPNOTSUPP : 0;
+ if (add)
+ pf->fd_udp4_filter_cnt++;
+ else
+ pf->fd_udp4_filter_cnt--;
+
+ return 0;
}
#define I40E_TCPIP_DUMMY_PACKET_LEN 54
@@ -263,7 +279,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct tcphdr *tcp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
/* Dummy packet */
@@ -281,39 +296,110 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
tcp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
tcp->source = fd_data->src_port;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+ if (add)
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ }
+
if (add) {
- pf->fd_tcp_rule++;
+ pf->fd_tcp4_filter_cnt++;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
} else {
- pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
- (pf->fd_tcp_rule - 1) : 0;
- if (pf->fd_tcp_rule == 0) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ pf->fd_tcp4_filter_cnt--;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ return 0;
+}
+
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct sctphdr *sctp;
+ struct iphdr *ip;
+ u8 *raw_packet;
+ int ret;
+ /* Dummy packet */
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+ memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+ ip->daddr = fd_data->dst_ip;
+ sctp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip;
+ sctp->source = fd_data->src_port;
+
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
else
dev_info(&pf->pdev->dev,
@@ -321,10 +407,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
+ if (add)
+ pf->fd_sctp4_filter_cnt++;
+ else
+ pf->fd_sctp4_filter_cnt--;
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
#define I40E_IP_DUMMY_PACKET_LEN 34
@@ -343,7 +431,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
{
struct i40e_pf *pf = vsi->back;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
int i;
@@ -359,18 +446,29 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- ip->saddr = fd_data->src_ip[0];
- ip->daddr = fd_data->dst_ip[0];
+ ip->saddr = fd_data->src_ip;
+ ip->daddr = fd_data->dst_ip;
ip->protocol = 0;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
fd_data->pctype = i;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* The packet buffer wasn't added to the ring so we
+ * need to free it now.
+ */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -383,10 +481,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
}
}
- if (err)
- kfree(raw_packet);
+ if (add)
+ pf->fd_ip4_filter_cnt++;
+ else
+ pf->fd_ip4_filter_cnt--;
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
/**
@@ -409,6 +509,9 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case UDP_V4_FLOW:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ break;
case IP_USER_FLOW:
switch (input->ip4_proto) {
case IPPROTO_TCP:
@@ -417,19 +520,23 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case IPPROTO_UDP:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ break;
case IPPROTO_IP:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
default:
/* We cannot support masking based on protocol */
- goto unsupported_flow;
+ dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
+ input->ip4_proto);
+ return -EINVAL;
}
break;
default:
-unsupported_flow:
- dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
+ dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
input->flow_type);
- ret = -EINVAL;
+ return -EINVAL;
}
/* The buffer allocated here will be normally be freed by
@@ -476,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
* progress do nothing, once flush is complete the state will
* be cleared.
*/
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
pf->fd_add_err++;
@@ -484,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
/* filter programming failed most likely due to table full */
@@ -498,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->auto_disable_flags &
- I40E_FLAG_FD_SB_ENABLED)) {
+ !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
- pf->auto_disable_flags |=
- I40E_FLAG_FD_SB_ENABLED;
}
}
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -599,19 +704,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
- * @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
u32 head, tail;
- if (!in_sw)
- head = i40e_get_head(ring);
- else
- head = ring->next_to_clean;
+ head = i40e_get_head(ring);
tail = readl(ring->tail);
if (head != tail)
@@ -657,6 +758,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
break;
@@ -683,6 +785,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
@@ -734,11 +838,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- unsigned int j = i40e_get_tx_pending(tx_ring, false);
+ unsigned int j = i40e_get_tx_pending(tx_ring);
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &vsi->state) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -756,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &vsi->state)) {
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -930,9 +1034,29 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
}
/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: qword representing status_error_len in CPU ordering
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+ /* The Rx filter programming status and SPH bit occupy the same
+ * spot in the descriptor. Since we don't support packet split we
+ * can just reuse the bit as an indication that this is a
+ * programming status descriptor.
+ */
+ return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
+}
+
+/**
* i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW
+ * @qw: qword representing status_error_len in CPU ordering
*
* Flow director should handle FD_FILTER_STATUS to check its filter programming
* status being successful or not and take actions accordingly. FCoE should
@@ -940,22 +1064,23 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*
**/
static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ u64 qw)
{
- u64 qw;
+ u32 ntc = rx_ring->next_to_clean + 1;
u8 id;
- qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id);
-#ifdef I40E_FCOE
- else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
- (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
- i40e_fcoe_handle_status(rx_ring, rx_desc, id);
-#endif
}
/**
@@ -1010,7 +1135,6 @@ err:
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -1030,8 +1154,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_bi->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL;
rx_bi->page_offset = 0;
@@ -1132,6 +1270,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
@@ -1152,27 +1301,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = i40e_rx_offset(rx_ring);
+
+ /* initialize pagecnt_bias to 1 representing we fully own page */
+ bi->pagecnt_bias = 1;
return true;
}
@@ -1219,6 +1374,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -1259,8 +1420,6 @@ no_buffers:
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
@@ -1422,12 +1581,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
@@ -1472,7 +1631,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/**
@@ -1493,8 +1655,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1517,13 +1677,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page)))
@@ -1531,21 +1688,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET)
return false;
#endif
- /* Inc ref count on page before passing it up to the stack */
- get_page(page);
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -1554,145 +1713,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
**/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = I40E_RXBUFFER_2048;
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
#endif
- unsigned int pull_len;
-
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= I40E_RX_HDR_SIZE) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is reusable, we can reuse buffer as-is */
- if (likely(i40e_page_is_reusable(page)))
- return true;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
+ /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
- /* we need the header to contain the greater of either
- * ETH_HLEN or 60 bytes if the skb->len is less than
- * 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size)
+{
+ struct i40e_rx_buffer *rx_buffer;
- /* align pull length to size of long to optimize
- * memcpy performance
- */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
- /* update all of the pointers */
- va += pull_len;
- size -= pull_len;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
- return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+ return rx_buffer;
}
/**
- * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
*/
-static inline
-struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
{
- u64 local_status_error_len =
- le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size =
- (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- struct i40e_rx_buffer *rx_buffer;
- struct page *page;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
- if (likely(!skb)) {
- void *page_addr = page_address(page) + rx_buffer->page_offset;
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
+ }
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(va - I40E_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- return NULL;
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, I40E_SKB_PAD);
+ __skb_put(skb, size);
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
+ return skb;
+}
- /* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buffer->page = NULL;
-
- return skb;
}
/**
@@ -1718,11 +1933,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
prefetch(I40E_RX_DESC(rx_ring, ntc));
-#define staterrlen rx_desc->wb.qword1.status_error_len
- if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- return true;
- }
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
@@ -1753,7 +1963,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < budget)) {
+ struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1770,22 +1982,44 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ * hardware wrote DD then the length will be non-zero
*/
- if (!i40e_test_staterr(rx_desc,
- BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
*/
dma_rmb();
- skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
- if (!skb)
+ if (unlikely(i40e_rx_is_programming_status(qword))) {
+ i40e_clean_programming_status(rx_ring, rx_desc, qword);
+ continue;
+ }
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
break;
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ else
+ skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1798,6 +2032,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -1816,18 +2051,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-#ifdef I40E_FCOE
- if (unlikely(
- i40e_rx_is_fcoe(rx_ptype) &&
- !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
- dev_kfree_skb_any(skb);
- continue;
- }
-#endif
-
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
@@ -1944,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
@@ -1973,13 +2200,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_DOWN, &vsi->state)) {
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
- /* Clear hung_detected bit */
- clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2079,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
return;
/* if sampling is disabled do nothing */
@@ -2113,10 +2338,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
- if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
return;
- if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2178,8 +2402,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -2200,15 +2423,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-#ifdef I40E_FCOE
-inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
-#endif
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -2409,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
return 0;
if (pf->ptp_tx &&
- !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
+ !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
pf->ptp_tx_skb = skb_get(skb);
} else {
@@ -2716,15 +2933,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-#ifdef I40E_FCOE
-inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#endif
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -2925,6 +3136,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) {
@@ -3003,6 +3216,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f80979025c01..f5de51124cae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -117,10 +117,9 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096 4096
-#define I40E_RXBUFFER_8192 8192
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -133,6 +132,61 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -255,7 +309,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -269,7 +328,6 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- u64 tx_lost_interrupt;
};
struct i40e_rx_queue_stats {
@@ -335,7 +393,8 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -363,6 +422,21 @@ struct i40e_ring {
*/
} ____cacheline_internodealigned_in_smp;
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
@@ -384,6 +458,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -393,15 +478,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
-#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset);
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring, u32 *flags);
-#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
@@ -483,16 +561,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
}
/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 939f9fdc8f85..3a18ed13edc4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -78,6 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_IWARP = 0x00F00000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -1213,25 +1214,6 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
};
-#ifdef I40E_FCOE
-/* Statistics collected per function for FCoE */
-struct i40e_fcoe_stats {
- u64 rx_fcoe_packets; /* fcoeprc */
- u64 rx_fcoe_dwords; /* focedwrc */
- u64 rx_fcoe_dropped; /* fcoerpdc */
- u64 tx_fcoe_packets; /* fcoeptc */
- u64 tx_fcoe_dwords; /* focedwtc */
- u64 fcoe_bad_fccrc; /* fcoecrc */
- u64 fcoe_last_error; /* fcoelast */
- u64 fcoe_ddp_count; /* fcoeddpc */
-};
-
-/* offset to per function FCoE statistics block */
-#define I40E_FCOE_VF_STAT_OFFSET 0
-#define I40E_FCOE_PF_STAT_OFFSET 128
-#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
-
-#endif
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1319,125 +1301,6 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
-#ifdef I40E_FCOE
-/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
-
-enum i40E_fcoe_tx_ctx_desc_cmd_bits {
- I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
- I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
- I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
- I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
-};
-
-/* FCoE DDP Context descriptor */
-struct i40e_fcoe_ddp_context_desc {
- __le64 rsvd;
- __le64 type_cmd_foff_lsize;
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
- I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
-#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
- I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
-
-enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
- I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
- I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
- I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
- I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
-
-/* FCoE DDP/DWO Queue Context descriptor */
-struct i40e_fcoe_queue_context_desc {
- __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
- __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
- I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
- I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
- I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
- I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-enum i40e_fcoe_queue_ctx_desc_tph_bits {
- I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
- I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
- I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
-
-/* FCoE DDP/DWO Filter Context descriptor */
-struct i40e_fcoe_filter_context_desc {
- __le32 param;
- __le16 seqn;
-
- /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
- __le16 rsvd_dmaindx;
-
- /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
- __le64 flags_rsvd_lanq;
-};
-
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
- I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
-
-enum i40e_fcoe_filter_ctx_desc_flags_bits {
- I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
- I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
- I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
-};
-
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
- I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
- I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
- I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
-
-#endif /* I40E_FCOE */
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
@@ -1600,4 +1463,83 @@ struct i40e_lldp_variables {
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for PPP */
+struct i40e_ppp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_PPP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ppp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ppp_version version;
+ u32 size;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ u32 track_id;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ char name[I40E_PPP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ppp_version version;
+ u8 op;
+#define I40E_PPP_ADD_TRACKID 0x01
+#define I40E_PPP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_PPP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 974ba2baf6ea..8552192a5bde 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78460c52b7c4..95c23fbaa211 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
continue;
/* Ignore return value on purpose - a given VF may fail, but
@@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
return;
/* verify if the VF is in either init or active before proceeding */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return;
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
@@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
- (u32)hena);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
}
/* program mac filter */
@@ -811,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
u32 reg_idx, reg;
int i, msix_vf;
+ /* Start by disabling VF's configuration API to prevent the OS from
+ * accessing the VF's VSI after it's freed / invalidated.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
@@ -850,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/* reset some of the state variables keeping track of the resources */
vf->num_queue_pairs = 0;
vf->vf_states = 0;
- clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
}
/**
@@ -882,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
vf->num_queue_pairs = total_queue_pairs;
/* VF is now completely initialized */
- set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+ set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
error_alloc:
if (ret)
@@ -921,25 +923,30 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
}
/**
- * i40e_reset_vf
+ * i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * reset the VF
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
**/
-void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
- bool rsd = false;
- int i;
-
- if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
- return;
/* warn the VF */
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in i40e_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -960,6 +967,79 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
vf->vf_id);
+}
+
+/**
+ * i40e_cleanup_reset_vf
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed.
+ **/
+static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* free VF resources to begin resetting the VSI state */
+ i40e_free_vf_res(vf);
+
+ /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in i40e_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!i40e_alloc_vf_res(vf)) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ /* Do not notify the client during VF init */
+ if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+ &vf->vf_states))
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the VF
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ return;
+
+ i40e_trigger_vf_reset(vf, flr);
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
@@ -984,35 +1064,116 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ usleep_range(10000, 20000);
- /* On initial reset, we won't have any queues */
- if (vf->lan_vsi_idx == 0)
- goto complete_reset;
+ /* On initial reset, we don't have any queues to disable */
+ if (vf->lan_vsi_idx != 0)
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
-complete_reset:
- /* reallocate VF resources to reset the VSI state */
- i40e_free_vf_res(vf);
- if (!i40e_alloc_vf_res(vf)) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- i40e_enable_vf_mappings(vf);
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
- clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
- /* Do not notify the client during VF init */
- if (vf->pf->num_alloc_vfs)
- i40e_notify_client_of_vf_reset(pf, abs_vf_id);
- vf->num_vlan = 0;
+ i40e_cleanup_reset_vf(vf);
+
+ i40e_flush(hw);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
+}
+
+/**
+ * i40e_reset_all_vfs
+ * @pf: pointer to the PF structure
+ * @flr: VFLR was issued or not
+ *
+ * Reset all allocated VFs in one go. First, tell the hardware to reset each
+ * VF, then do all the waiting in one chunk, and finally finish restoring each
+ * VF after the wait. This is useful during PF routines which need to reset
+ * all VFs, as otherwise it must perform these resets in a serialized fashion.
+ **/
+void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int i, v;
+ u32 reg;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ return;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence, beginning with the VF to fail
+ * the previous check.
+ */
+ while (v < pf->num_alloc_vfs) {
+ vf = &pf->vf[v];
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
}
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
+ if (flr)
+ usleep_range(10000, 20000);
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ pf->vf[v].vf_id);
+ usleep_range(10000, 20000);
+
+ /* Begin disabling all the rings associated with VFs, but do not wait
+ * between each VF.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Now that we've notified HW to disable all of the VF rings, wait
+ * until they finish.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Hw may need up to 50ms to finish disabling the RX queues. We
+ * minimize the wait by delaying only once for all VFs.
+ */
+ mdelay(50);
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_cleanup_reset_vf(&pf->vf[v]);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, &pf->state);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -1029,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
- while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+ while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
i40e_notify_client_of_vf_enable(pf, 0);
- for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ /* Amortize wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -1046,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
- msleep(20); /* let any messages in transit get finished up */
-
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
i40e_disable_vf_mappings(&pf->vf[i]);
@@ -1075,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
- clear_bit(__I40E_VF_DISABLE, &pf->state);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
}
#ifdef CONFIG_PCI_IOV
@@ -1120,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
- /* VF resources get allocated during reset */
- i40e_reset_vf(&vfs[i], false);
+
+ set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
}
pf->num_alloc_vfs = num_alloc_vfs;
+ /* VF resources get allocated during reset */
+ i40e_reset_all_vfs(pf, false);
+
i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
err_alloc:
@@ -1152,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
- if (test_bit(__I40E_TESTING, &pf->state)) {
+ if (test_bit(__I40E_TESTING, pf->state)) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
@@ -1258,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
"Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
} else {
vf->num_valid_msgs++;
@@ -1333,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
int len = 0;
int ret;
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -1359,10 +1533,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
- if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+ if (i40e_vf_client_capable(pf, vf->vf_id) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
- set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
+ set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
}
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -1383,6 +1557,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
@@ -1416,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -1439,7 +1620,7 @@ err:
**/
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
i40e_reset_vf(vf, false);
}
@@ -1487,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
!vsi) {
aq_ret = I40E_ERR_PARAM;
@@ -1548,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
if (allmulti)
- set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
else
- clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
}
if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
@@ -1599,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
if (alluni)
- set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
else
- clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
error_param:
@@ -1630,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1687,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
unsigned long tempmap;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1747,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1786,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1828,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1853,7 +2034,7 @@ error_param:
}
/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
@@ -1915,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1984,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2050,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
goto error_param;
}
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -2077,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!ret)
vf->num_vlan++;
- if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
true,
vfl->vlan_id[i],
NULL);
- if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
true,
vfl->vlan_id[i],
@@ -2117,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -2140,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
vf->num_vlan--;
- if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
false,
vfl->vlan_id[i],
NULL);
- if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
false,
vfl->vlan_id[i],
@@ -2171,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2202,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2240,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrk->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2272,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrl->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2302,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2339,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2369,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int valid_len = 0;
/* Check if VF is disabled. */
- if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
return I40E_ERR_PARAM;
/* Validate message length. */
@@ -2637,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
struct i40e_vf *vf;
int vf_id;
- if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
return 0;
/* Re-enable the VFLR interrupt cause here, before looking for which
@@ -2650,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
- clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
@@ -2693,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2782,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2914,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2995,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &(pf->vf[vf_id]);
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -3114,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
}
vf = &(pf->vf[vf_id]);
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 4012d069939a..20d7c8160e9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -56,13 +56,14 @@ enum i40e_queue_ctrl {
/* VF states */
enum i40e_vf_states {
- I40E_VF_STAT_INIT = 0,
- I40E_VF_STAT_ACTIVE,
- I40E_VF_STAT_IWARPENA,
- I40E_VF_STAT_FCOEENA,
- I40E_VF_STAT_DISABLED,
- I40E_VF_STAT_MC_PROMISC,
- I40E_VF_STAT_UC_PROMISC,
+ I40E_VF_STATE_INIT = 0,
+ I40E_VF_STATE_ACTIVE,
+ I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_FCOEENA,
+ I40E_VF_STATE_DISABLED,
+ I40E_VF_STATE_MC_PROMISC,
+ I40E_VF_STATE_UC_PROMISC,
+ I40E_VF_STATE_PRE_ENABLE,
};
/* VF capabilities */
@@ -87,7 +88,6 @@ struct i40e_vf {
u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr;
- struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted;
@@ -125,6 +125,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 3a423836a565..a393f4a07f06 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -29,8 +29,11 @@
#
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
- i40e_txrx.o i40e_common.o i40e_adminq.o
+ i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 96385156b824..8b0d4b255dea 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,8 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (i40evf_asq_done(hw))
break;
- usleep_range(1000, 2000);
- total_delay++;
+ udelay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 1f9b3b5d946d..e0bfaa3d4a21 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index eeb9864bc5b1..91d8786d386d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,10 +185,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Pipeline Personalization Profile */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -558,6 +571,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +703,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -691,6 +756,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1364,6 +1430,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Pipeline Personalization Profile */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ppp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_PPP_GET_CONF 0x1
+#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1839,11 +1935,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 89dfdbca13db..43f10761f4ba 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
@@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
@@ -1127,3 +1131,215 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
+
+/**
+ * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ppp_resp *resp;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40evf_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40evf_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40evf_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_PPP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+
+ status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index ba6c6bda0e22..741223d5d809 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -122,4 +122,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
new file mode 100644
index 000000000000..9a5100b2b7c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -0,0 +1,229 @@
+/*******************************************************************************
+ *
+ * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
+ * Copyright(c) 2013 - 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40evf will be "i40evf".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40evf
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40evf_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40evf_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40evf_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the VF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c91fcf43ccbc..dfe241a12ad0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -28,6 +28,7 @@
#include <net/busy_poll.h>
#include "i40evf.h"
+#include "i40e_trace.h"
#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
@@ -137,10 +138,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- if (!in_sw)
- head = i40e_get_head(ring);
- else
- head = ring->next_to_clean;
+ head = ring->next_to_clean;
tail = readl(ring->tail);
if (head != tail)
@@ -165,7 +163,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
- struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit;
@@ -174,8 +171,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
-
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -186,8 +181,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* we have caught up to head, no work left to do */
- if (tx_head == tx_desc)
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
/* clear next_to_watch to prevent false hangs */
@@ -212,6 +209,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
@@ -267,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &vsi->state) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &vsi->state)) {
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -464,10 +463,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
- /* add u32 for head writeback, align after this takes care of
- * guaranteeing this is at least one cache line in size
- */
- tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -493,7 +488,6 @@ err:
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -513,8 +507,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_bi->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL;
rx_bi->page_offset = 0;
@@ -615,6 +623,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
@@ -635,27 +654,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = i40e_rx_offset(rx_ring);
+
+ /* initialize pagecnt_bias to 1 representing we fully own page */
+ bi->pagecnt_bias = 1;
return true;
}
@@ -702,6 +727,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -742,8 +773,6 @@ no_buffers:
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
@@ -806,13 +835,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* If there is an outer header present that might contain a checksum
- * we need to bump the checksum level by 1 to reflect the fact that
- * we are indicating we validated the inner checksum.
- */
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
- skb->csum_level = 1;
-
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
@@ -895,12 +917,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
{
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
@@ -945,7 +967,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/**
@@ -966,8 +991,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -990,13 +1013,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page)))
@@ -1004,21 +1024,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET)
return false;
#endif
- /* Inc ref count on page before passing it up to the stack */
- get_page(page);
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -1027,145 +1049,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
**/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = I40E_RXBUFFER_2048;
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
#endif
- unsigned int pull_len;
-
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= I40E_RX_HDR_SIZE) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is reusable, we can reuse buffer as-is */
- if (likely(i40e_page_is_reusable(page)))
- return true;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
+ /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
- /* we need the header to contain the greater of either
- * ETH_HLEN or 60 bytes if the skb->len is less than
- * 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size)
+{
+ struct i40e_rx_buffer *rx_buffer;
- /* align pull length to size of long to optimize
- * memcpy performance
- */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
- /* update all of the pointers */
- va += pull_len;
- size -= pull_len;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
- return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+ return rx_buffer;
}
/**
- * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
*/
-static inline
-struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
{
- u64 local_status_error_len =
- le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size =
- (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- struct i40e_rx_buffer *rx_buffer;
- struct page *page;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
+ }
+
+ return skb;
+}
- if (likely(!skb)) {
- void *page_addr = page_address(page) + rx_buffer->page_offset;
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(va - I40E_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- return NULL;
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, I40E_SKB_PAD);
+ __skb_put(skb, size);
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
+ return skb;
+}
- /* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buffer->page = NULL;
-
- return skb;
}
/**
@@ -1221,7 +1299,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < budget)) {
+ struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1238,22 +1318,40 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ * hardware wrote DD then the length will be non-zero
*/
- if (!i40e_test_staterr(rx_desc,
- BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
*/
dma_rmb();
- skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
- if (!skb)
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
break;
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ else
+ skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1266,6 +1364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -1288,6 +1387,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
@@ -1408,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
@@ -1437,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_DOWN, &vsi->state)) {
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
@@ -1980,7 +2080,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2016,7 +2115,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2038,7 +2136,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2064,46 +2161,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
- /* write last descriptor with EOP bit */
- td_cmd |= I40E_TX_DESC_CMD_EOP;
-
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
- */
- desc_count |= ++tx_ring->packet_stride;
-
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
- /* write last descriptor with RS bit set */
- td_cmd |= I40E_TX_DESC_CMD_RS;
- tx_ring->packet_stride = 0;
- }
-
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= I40E_TXD_CMD;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
@@ -2119,7 +2178,7 @@ do_rs:
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2170,6 +2229,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) {
@@ -2237,6 +2298,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8274ba68bd32..901282c87cf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -104,10 +104,9 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096 4096
-#define I40E_RXBUFFER_8192 8192
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -120,6 +119,61 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -241,7 +295,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -321,7 +380,8 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -349,6 +409,21 @@ struct i40e_ring {
*/
} ____cacheline_internodealigned_in_smp;
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
@@ -370,6 +445,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -385,20 +471,6 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: Tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -460,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
-
-/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
/**
- * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 16bb88084bb9..bde7f24af1c6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -78,6 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -1396,4 +1397,83 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
+
+/* Version format for PPP */
+struct i40e_ppp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_PPP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ppp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ppp_version version;
+ u32 size;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ u32 track_id;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ char name[I40E_PPP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ppp_version version;
+ u8 op;
+#define I40E_PPP_ADD_TRACKID 0x01
+#define I40E_PPP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_PPP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index d38a2b2aea2b..c5ad0388c3d5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -161,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -393,6 +396,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 00c42d803276..b8ada6d8d890 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -49,6 +49,13 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
+/* VSI state flags shared with common code */
+enum i40evf_vsi_state_t {
+ __I40E_VSI_DOWN,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
+};
+
/* dummy struct to make common code less painful */
struct i40e_vsi {
struct i40evf_adapter *back;
@@ -56,10 +63,11 @@ struct i40e_vsi {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
int base_vector;
u16 work_limit;
u16 qs_handle;
+ void *priv; /* client driver data reference. */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -71,10 +79,6 @@ struct i40e_vsi {
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
-
-/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048 2048
-#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32
#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
@@ -169,15 +173,15 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+ __I40EVF_IN_CLIENT_TASK,
};
-/* make common code happy */
-#define __I40E_DOWN __I40EVF_DOWN
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
+ struct delayed_work client_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -195,15 +199,16 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ int num_iwarp_msix;
+ int iwarp_base_vector;
u32 client_pending;
+ struct i40e_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IN_NETPOLL BIT(4)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
#define I40EVF_FLAG_RESET_PENDING BIT(9)
#define I40EVF_FLAG_RESET_NEEDED BIT(10)
@@ -211,15 +216,18 @@ struct i40evf_adapter {
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_PROMISC_ON BIT(15)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
+#define I40EVF_FLAG_PROMISC_ON BIT(18)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
+#define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */
-#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
+#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX
/* flags for admin queue service task */
u32 aq_required;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
@@ -246,7 +254,6 @@ struct i40evf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
struct i40e_hw hw; /* defined in i40e_type.h */
@@ -258,10 +265,11 @@ struct i40evf_adapter {
bool link_up;
enum i40e_aq_link_speed link_speed;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -292,6 +300,12 @@ struct i40evf_adapter {
/* Ethtool Private Flags */
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40evf_adapter *vf;
+};
+
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
extern const char i40evf_driver_version[];
@@ -337,4 +351,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644
index 000000000000..ee737680a0e9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -0,0 +1,564 @@
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+ .virtchnl_send = i40evf_client_virtchnl_send,
+ .setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+ struct i40e_client_instance *cinst;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtchnl_receive function\n");
+ return;
+ }
+ cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
+ msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ memset(&params, 0, sizeof(params));
+ params.mtu = vsi->netdev->mtu;
+ params.link_up = vsi->back->link_up;
+ params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change function\n");
+ return;
+ }
+ cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+ &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+ int ret;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open function\n");
+ return;
+ }
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ }
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ I40E_SUCCESS, NULL, 0, NULL);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close function\n");
+ return;
+ }
+ cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+ struct i40e_client_instance *cinst = NULL;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ int i;
+
+ if (!vf_registered_client)
+ goto out;
+
+ if (adapter->cinst) {
+ cinst = adapter->cinst;
+ goto out;
+ }
+
+ cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+ if (!cinst)
+ goto out;
+
+ cinst->lan_info.vf = (void *)adapter;
+ cinst->lan_info.netdev = vsi->netdev;
+ cinst->lan_info.pcidev = adapter->pdev;
+ cinst->lan_info.fid = 0;
+ cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+ cinst->lan_info.ops = &i40evf_lan_ops;
+ cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+ cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+ cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+ cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_entries =
+ &adapter->msix_entries[adapter->iwarp_base_vector];
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ cinst->lan_info.params.qos.prio_qos[i].tc = 0;
+ cinst->lan_info.params.qos.prio_qos[i].qs_handle =
+ vsi->qs_handle;
+ }
+
+ mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+ cinst->client = vf_registered_client;
+ adapter->cinst = cinst;
+out:
+ return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ * @client: pointer to the client struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+ kfree(adapter->cinst);
+ adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+ struct i40e_client *client = vf_registered_client;
+ struct i40e_client_instance *cinst;
+ int ret = 0;
+
+ if (adapter->state < __I40EVF_DOWN)
+ return;
+
+ /* first check client is registered */
+ if (!client)
+ return;
+
+ /* Add the client instance to the instance list */
+ cinst = i40evf_client_add_instance(adapter);
+ if (!cinst)
+ return;
+
+ dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+ client->name);
+
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ /* Send an Open request to the client */
+
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cinst->lan_info, client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cinst->state);
+ else
+ /* remove client instance */
+ i40evf_client_del_instance(adapter);
+ }
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->vf = adapter;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40evf_devices);
+ dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ dev_info(&adapter->pdev->dev,
+ "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ cinst = adapter->cinst;
+ if (!cinst)
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (client->ops && client->ops->close)
+ client->ops->close(&cinst->lan_info, client,
+ false);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+ dev_warn(&adapter->pdev->dev,
+ "Client %s instance closed\n", client->name);
+ }
+ /* delete the client instance */
+ i40evf_client_del_instance(adapter);
+ dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ /* Signal the watchdog to service the client */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+ I40E_SUCCESS, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct i40evf_adapter *adapter = ldev->vf;
+ struct i40e_qv_info *qv_info;
+ i40e_status err;
+ u32 v_idx, i;
+ u32 msg_size;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ /* A quick check on whether the vectors belong to the client */
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if ((v_idx >=
+ (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+ (v_idx < adapter->iwarp_base_vector))
+ return -EINVAL;
+ }
+
+ v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ (v_qvlist_info->num_vectors - 1));
+
+ adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ err = -EBUSY;
+ for (i = 0; i < 5; i++) {
+ msleep(100);
+ if (!(adapter->client_pending &
+ BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ err = 0;
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40evf: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (vf_registered_client) {
+ pr_info("i40evf: Client %s has already been registered!\n",
+ client->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+ pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40evf_client_interface_version_str);
+ ret = -EIO;
+ goto out;
+ }
+
+ vf_registered_client = client;
+
+ i40evf_client_prepare(client);
+
+ pr_info("i40evf: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40evf_client_release(client);
+
+ if (vf_registered_client != client) {
+ pr_info("i40evf: Client %s has not been registered\n",
+ client->name);
+ ret = -ENODEV;
+ goto out;
+ }
+ vf_registered_client = NULL;
+ pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644
index 000000000000..7d283c7506a5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -0,0 +1,166 @@
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR \
+ __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+ u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *vf; /* cast to i40evf_adapter */
+
+ /* All L2 params that could change during the life span of the device
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever the driver is
+ * ready to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be closed when netdev is unavailable or when unregister
+ * call comes in. If the close happens due to a reset, set the reset
+ * bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mss */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ /* called when a message is received from the PF */
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40EVF_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 272d600c1ed0..9bb2cc7dd4e4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,52 +63,74 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct i40evf_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u32 flag;
+ bool read_only;
+};
+
+#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
+ I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
+
/**
- * i40evf_get_settings - Get Link Speed and Duplex settings
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @cmd: ethtool command
*
* Reports speed/duplex settings. Because this is a VF, we don't know what
* kind of link we really have, so we fake it.
**/
-static int i40evf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = 0;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -125,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
+ else if (sset == ETH_SS_PRIV_FLAGS)
+ return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -190,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
+ } else if (sset == ETH_SS_PRIV_FLAGS) {
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40evf_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & adapter->flags)
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u64 changed_flags;
+ u32 i;
+
+ changed_flags = adapter->flags;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i))
+ adapter->flags |= priv_flags->flag;
+ else
+ adapter->flags &= ~(priv_flags->flag);
+ }
+
+ /* check for flags that changed */
+ changed_flags ^= adapter->flags;
+
+ /* Process any additional changes needed as a result of flag changes. */
+
+ /* issue a reset to force legacy-rx change to take effect */
+ if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
+ if (netif_running(netdev)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
}
+
+ return 0;
}
/**
@@ -239,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -643,7 +744,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops i40evf_ethtool_ops = {
- .get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = i40evf_get_ringparam,
@@ -651,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
+ .get_priv_flags = i40evf_get_priv_flags,
+ .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
@@ -663,6 +765,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
.get_rxfh_key_size = i40evf_get_rxfh_key_size,
+ .get_link_ksettings = i40evf_get_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f35dcaac5bb7..ea110a730e16 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -26,6 +26,14 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
+/* All i40evf tracepoints are defined by the include below, which must
+ * be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
+
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
@@ -36,9 +44,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -489,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+ if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return;
for (i = 0; i < q_vectors; i++)
@@ -685,12 +693,39 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
**/
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
+ unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct i40e_hw *hw = &adapter->hw;
int i;
+ /* Legacy Rx will always default to a 2048 buffer size. */
+#if (PAGE_SIZE < 8192)
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+ struct net_device *netdev = adapter->netdev;
+
+ /* For jumbo frames on systems with 4K pages we have to use
+ * an order 1 page, so we might as well increase the size
+ * of our Rx buffer to make better use of the available space
+ */
+ rx_buf_len = I40E_RXBUFFER_3072;
+
+ /* We use a 1536 buffer size for configurations with
+ * standard Ethernet mtu. On x86 this gives us enough room
+ * for shared info and 192 bytes of padding.
+ */
+ if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ }
+#endif
+
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
+ adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+ clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
+ else
+ set_ring_build_skb_enabled(&adapter->rx_rings[i]);
}
}
@@ -1053,11 +1088,13 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
- clear_bit(__I40E_DOWN, &adapter->vsi.state);
+ clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_napi_enable_all(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1235,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
}
pairs = adapter->num_active_queues;
- /* It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
+ * us much good if we have more vectors than CPUs. However, we already
+ * limit the total number of queues by the number of CPUs so we do not
+ * need any further limiting here.
*/
- v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
- v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
+ v_budget = min_t(int, pairs + NONQ_VECS,
+ (int)adapter->vf_res->max_vectors);
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -1472,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
rtnl_unlock();
@@ -1488,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = i40evf_alloc_queues(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
return 0;
-err_alloc_queues:
- i40evf_free_q_vectors(adapter);
err_alloc_q_vectors:
i40evf_reset_interrupt_capability(adapter);
err_set_interrupt:
+ i40evf_free_queues(adapter);
+err_alloc_queues:
return err;
}
@@ -1685,6 +1722,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+ schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -1716,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
adapter->link_up = false;
@@ -1773,10 +1811,17 @@ static void i40evf_reset_task(struct work_struct *work)
u32 reg_val;
int i = 0, err;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
-
+ if (CLIENT_ENABLED(adapter)) {
+ adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+ I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+ I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+ I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+ cancel_delayed_work_sync(&adapter->client_task);
+ i40evf_notify_client_close(&adapter->vsi, true);
+ }
i40evf_misc_irq_disable(adapter);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1819,6 +1864,7 @@ static void i40evf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
i40evf_disable_vf(adapter);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -1861,9 +1907,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- /* Open RDMA Client again */
- adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1980,6 +2025,48 @@ out:
}
/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, client_task.work);
+
+ /* If we can't get the client bit, just give up. We'll be rescheduled
+ * later.
+ */
+
+ if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+ return;
+
+ if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+ i40evf_client_subtask(adapter);
+ adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+ i40evf_notify_client_close(&adapter->vsi, false);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+ i40evf_notify_client_open(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ }
+out:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
+/**
* i40evf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
@@ -2147,7 +2234,9 @@ static int i40evf_close(struct net_device *netdev)
return 0;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
i40evf_down(adapter);
adapter->state = __I40EVF_DOWN_PENDING;
@@ -2162,21 +2251,6 @@ static int i40evf_close(struct net_device *netdev)
}
/**
- * i40evf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* i40evf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2188,6 +2262,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
struct i40evf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu;
+ if (CLIENT_ENABLED(adapter)) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -2278,7 +2356,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
.ndo_start_xmit = i40evf_xmit_frame,
- .ndo_get_stats = i40evf_get_stats,
.ndo_set_rx_mode = i40evf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40evf_set_mac,
@@ -2328,6 +2405,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) {
@@ -2339,46 +2418,52 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
return -ENODEV;
}
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ /* advertise to stack only if offloads for encapsulated packets is
+ * supported
+ */
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
0;
- if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
- netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ if (!(vfres->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ netdev->gso_partial_features |=
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= hw_enc_features;
+ }
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
/* Write features and hw_features separately to avoid polluting
- * with, or dropping, features that are set when we registgered.
+ * with, or dropping, features that are set when we registered.
*/
- netdev->hw_features |= netdev->hw_enc_features;
+ hw_features = hw_enc_features;
- netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_features |= hw_features;
- /* disable VLAN features if not supported */
- if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
- netdev->features ^= I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2519,9 +2604,6 @@ static void i40evf_init_task(struct work_struct *work)
goto err_alloc;
}
- if (hw->mac.type == I40E_MAC_X722_VF)
- adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
-
if (i40evf_process_config(adapter))
goto err_alloc;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
@@ -2581,13 +2663,19 @@ static void i40evf_init_task(struct work_struct *work)
adapter->netdev_registered = true;
netif_tx_stop_all_queues(netdev);
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_add_device(adapter);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+ err);
+ }
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __I40EVF_DOWN;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
@@ -2745,6 +2833,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2857,14 +2946,21 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
+ int err;
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
-
+ cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_del_device(adapter);
+ if (err)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ err);
+ }
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index bee58af390e1..deb2cb8dac6b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -26,6 +26,7 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
@@ -158,7 +159,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP |
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -233,7 +236,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
- int i, len;
+ int i, len, max_frame = I40E_MAX_RXBUFFER;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -248,6 +251,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (!vqci)
return;
+ /* Limit maximum frame size when jumbo frames is not enabled */
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+ (adapter->netdev->mtu <= ETH_DATA_LEN))
+ max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -259,17 +267,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
- vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
- (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
-
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
- vqpi->rxq.max_pkt_size = adapter->netdev->mtu
- + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
- vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ vqpi->rxq.max_pkt_size = max_frame;
+ vqpi->rxq.databuffer_size =
+ ALIGN(adapter->rx_rings[i].rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
vqpi++;
}
@@ -955,17 +960,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
- adapter->net_stats.rx_packets = stats->rx_unicast +
- stats->rx_multicast +
- stats->rx_broadcast;
- adapter->net_stats.tx_packets = stats->tx_unicast +
- stats->tx_multicast +
- stats->tx_broadcast;
- adapter->net_stats.rx_bytes = stats->rx_bytes;
- adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_discards;
- adapter->net_stats.tx_dropped = stats->tx_discards;
+ netdev->stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ netdev->stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.rx_dropped = stats->rx_discards;
+ netdev->stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
break;
@@ -999,6 +1004,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ /* Gobble zero-length replies from the PF. They indicate that
+ * a previous message was received OK, and the client doesn't
+ * care about that.
+ */
+ if (msglen && CLIENT_ENABLED(adapter))
+ i40evf_notify_client_message(&adapter->vsi,
+ msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &=
~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
@@ -1014,7 +1029,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- if (v_opcode != adapter->current_op)
+ if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
adapter->current_op, v_opcode);
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 8aee314332a8..d8517779439b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -39,6 +39,27 @@
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+/* Wake Up Status */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact */
+#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ E1000_WUS_EX | \
+ E1000_WUS_ARPD | \
+ E1000_WUS_IPV4 | \
+ E1000_WUS_IPV6 | \
+ E1000_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define E1000_WUPM_BYTES 128
+
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index d20af6b2f581..3e7fed73df15 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -55,6 +55,10 @@
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+/* VF requests to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index acbc3abe2ddd..bf9bf9056d0c 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -111,6 +111,16 @@ struct vf_data_storage {
bool spoofchk_enabled;
};
+/* Number of unicast MAC filters reserved for the PF in the RAR registers */
+#define IGB_PF_MAC_FILTERS_RESERVED 3
+
+struct vf_mac_filter {
+ struct list_head l;
+ int vf;
+ bool free;
+ u8 vf_mac[ETH_ALEN];
+};
+
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
@@ -142,12 +152,24 @@ struct vf_data_storage {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048
+#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#define IGB_TS_HDR_LEN 16
+
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IGB_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGB_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
#define AUTO_ALL_MODES 0
#define IGB_EEPROM_APME 0x0400
@@ -301,12 +323,51 @@ struct igb_q_vector {
};
enum e1000_ring_flags_t {
+ IGB_RING_FLAG_RX_3K_BUFFER,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_large_buffer(ring) \
+ test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IGB_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+#endif
+ return IGB_RXBUFFER_2048;
+}
+
+static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -398,6 +459,15 @@ struct igb_nfc_filter {
u16 action;
};
+struct igb_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 queue;
+ u8 state; /* bitmask */
+};
+
+#define IGB_MAC_STATE_DEFAULT 0x1
+#define IGB_MAC_STATE_IN_USE 0x2
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -524,6 +594,10 @@ struct igb_adapter {
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
bool etype_bitmap[MAX_ETYPE_FILTER];
+
+ struct igb_mac_addr *mac_table;
+ struct vf_mac_filter vf_macs;
+ struct vf_mac_filter *vf_mac_list;
};
/* flags controlling PTP/1588 function */
@@ -545,6 +619,7 @@ struct igb_adapter {
#define IGB_FLAG_HAS_MSIX BIT(13)
#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
+#define IGB_FLAG_RX_LEGACY BIT(16)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -558,7 +633,6 @@ struct igb_adapter {
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -591,7 +665,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -604,7 +677,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 737b664d004c..0efb62db6efd 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -144,7 +144,15 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
-static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
+
+static int igb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -152,76 +160,73 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
u32 speed;
+ u32 supported, advertising;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_FIBRE |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause);
- ecmd->advertising = (ADVERTISED_FIBRE |
- ADVERTISED_1000baseKX_Full);
+ supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
if (hw->mac.type == e1000_i354) {
if ((hw->device_id ==
E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->supported &=
- ~SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- ecmd->advertising &=
- ~ADVERTISED_1000baseKX_Full;
+ supported |= SUPPORTED_2500baseX_Full;
+ supported &= ~SUPPORTED_1000baseKX_Full;
+ advertising |= ADVERTISED_2500baseX_Full;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
if (eth_flags->e100_base_fx) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
if (hw->mac.autoneg == 1)
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (hw->mac.autoneg != 1)
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
switch (hw->fc.requested_mode) {
case e1000_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case e1000_fc_rx_pause:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
}
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
@@ -236,39 +241,46 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_cmd_speed_set(ecmd, speed);
+ cmd->base.speed = speed;
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == e1000_media_type_copper)
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int igb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -283,12 +295,12 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -297,10 +309,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber) {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
switch (adapter->link_speed) {
@@ -320,31 +335,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
break;
}
} else {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
}
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -852,6 +867,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
}
static void igb_get_ringparam(struct net_device *netdev,
@@ -1811,14 +1828,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+ while (rx_desc->wb.upper.length) {
/* check Rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* verify contents of skb */
@@ -1828,12 +1845,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer_info->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer_info, dma),
+ dma_unmap_len(tx_buffer_info, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer_info, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -2271,6 +2297,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return IGB_STATS_LEN;
case ETH_SS_TEST:
return IGB_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGB_PRIV_FLAGS_STR_LEN;
default:
return -ENOTSUPP;
}
@@ -2376,6 +2404,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igb_priv_flags_strings,
+ IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -3388,9 +3420,38 @@ static int igb_set_channels(struct net_device *netdev,
return 0;
}
+static u32 igb_get_priv_flags(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IGB_FLAG_RX_LEGACY;
+ if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
+ flags |= IGB_FLAG_RX_LEGACY;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
@@ -3426,8 +3487,12 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
+ .get_priv_flags = igb_get_priv_flags,
+ .set_priv_flags = igb_set_priv_flags,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+ .get_link_ksettings = igb_get_link_ksettings,
+ .set_link_ksettings = igb_set_link_ksettings,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index be456bae8169..1cf74aa4ebd9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -161,11 +161,16 @@ static void igb_vlan_mode(struct net_device *netdev,
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+static void igb_rar_set_index(struct igb_adapter *, u32);
static void igb_ping_all_vfs(struct igb_adapter *);
static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+static void igb_flush_mac_table(struct igb_adapter *);
+static int igb_available_rars(struct igb_adapter *, u8);
+static void igb_set_default_mac_filter(struct igb_adapter *);
+static int igb_uc_sync(struct net_device *, const unsigned char *);
+static int igb_uc_unsync(struct net_device *, const unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
@@ -554,7 +559,7 @@ rx_ring_summary:
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- IGB_RX_BUFSZ, true);
+ igb_rx_bufsz(rx_ring), true);
}
}
}
@@ -1153,6 +1158,8 @@ msi_only:
pci_disable_sriov(adapter->pdev);
msleep(500);
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
@@ -1987,6 +1994,13 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
+ /* RAR registers were cleared during init_hw, clear mac table */
+ igb_flush_mac_table(adapter);
+ __dev_uc_unsync(adapter->netdev, NULL);
+
+ /* Recover default RAR entry */
+ igb_set_default_mac_filter(adapter);
+
/* Flow control settings reset on hardware reset, so guarantee flow
* control is off when forcing speed.
*/
@@ -2095,11 +2109,9 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
struct igb_adapter *adapter = netdev_priv(dev);
- struct e1000_hw *hw = &adapter->hw;
int vfn = adapter->vfs_allocated_count;
- int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
- if (netdev_uc_count(dev) >= rar_entries)
+ if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
return -ENOMEM;
}
@@ -2517,6 +2529,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
+ igb_set_default_mac_filter(adapter);
+
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
@@ -2761,6 +2775,7 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
@@ -2796,6 +2811,8 @@ static int igb_disable_sriov(struct pci_dev *pdev)
msleep(500);
}
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
@@ -2816,8 +2833,9 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
int old_vfs = pci_num_vf(pdev);
+ struct vf_mac_filter *mac_list;
int err = 0;
- int i;
+ int num_vf_mac_filters, i;
if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
@@ -2845,6 +2863,38 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
+ /* Due to the limited number of RAR entries calculate potential
+ * number of MAC filters available for the VFs. Reserve entries
+ * for PF default MAC, PF MAC filters and at least one RAR entry
+ * for each VF for VF MAC.
+ */
+ num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
+ (1 + IGB_PF_MAC_FILTERS_RESERVED +
+ adapter->vfs_allocated_count);
+
+ adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
+ sizeof(struct vf_mac_filter),
+ GFP_KERNEL);
+
+ mac_list = adapter->vf_mac_list;
+ INIT_LIST_HEAD(&adapter->vf_macs.l);
+
+ if (adapter->vf_mac_list) {
+ /* Initialize list of VF MAC filters */
+ for (i = 0; i < num_vf_mac_filters; i++) {
+ mac_list->vf = -1;
+ mac_list->free = true;
+ list_add(&mac_list->l, &adapter->vf_macs.l);
+ mac_list++;
+ }
+ } else {
+ /* If we could not allocate memory for the VF MAC filters
+ * we can continue without this feature but warn user.
+ */
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for VF MAC filter list\n");
+ }
+
/* only call pci_enable_sriov() if no VFs are allocated already */
if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
@@ -2861,6 +2911,8 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
err_out:
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
@@ -2937,6 +2989,7 @@ static void igb_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_mem_regions(pdev);
+ kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -3099,6 +3152,11 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGB_FLAG_HAS_MSIX;
+ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
+ hw->mac.rar_entry_count, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
igb_probe_vfs(adapter);
igb_init_queue_configuration(adapter);
@@ -3293,7 +3351,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3404,6 +3462,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= IGB_TX_HTHRESH << 8;
txdctl |= IGB_TX_WTHRESH << 16;
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct igb_tx_buffer) * ring->count);
+
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
}
@@ -3435,7 +3497,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3720,6 +3782,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
+ union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
@@ -3741,7 +3804,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3758,11 +3824,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct igb_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGB_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+ /* set build_skb and buffer size flags */
+ clear_ring_build_skb_enabled(rx_ring);
+ clear_ring_uses_large_buffer(rx_ring);
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ return;
+
+ set_ring_build_skb_enabled(rx_ring);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ return;
+
+ set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3774,14 +3868,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
int i;
/* set the correct pool for the PF default MAC address in entry 0 */
- igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
- adapter->vfs_allocated_count);
+ igb_set_default_mac_filter(adapter);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3822,55 +3919,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources(adapter->tx_ring[i]);
}
-void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
- struct igb_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* buffer_info must be completely set up in the transmit path */
-}
-
/**
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
- struct igb_tx_buffer *buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
- /* Free all the Tx ring sk_buffs */
+ while (i != tx_ring->next_to_use) {
+ union e1000_adv_tx_desc *eop_desc, *tx_desc;
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -3932,50 +4037,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
- unsigned long size;
- u16 i;
+ u16 i = rx_ring->next_to_clean;
if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (!buffer_info->page)
- continue;
-
/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
dma_sync_single_range_for_cpu(rx_ring->dev,
buffer_info->dma,
buffer_info->page_offset,
- IGB_RX_BUFSZ,
+ igb_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
buffer_info->dma,
- PAGE_SIZE,
+ igb_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ IGB_RX_DMA_ATTR);
__page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias);
- buffer_info->page = NULL;
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
}
- size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -4014,8 +4108,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
- igb_rar_set_qsel(adapter, hw->mac.addr, 0,
- adapter->vfs_allocated_count);
+ igb_set_default_mac_filter(adapter);
return 0;
}
@@ -4059,49 +4152,6 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
-/**
- * igb_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int igb_write_uc_addr_list(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->vfs_allocated_count;
- unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev) && rar_entries) {
- struct netdev_hw_addr *ha;
-
- netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- igb_rar_set_qsel(adapter, ha->addr,
- rar_entries--,
- vfn);
- count++;
- }
- }
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--) {
- wr32(E1000_RAH(rar_entries), 0);
- wr32(E1000_RAL(rar_entries), 0);
- }
- wrfl();
-
- return count;
-}
-
static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4240,7 +4290,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count;
- u32 rctl = 0, vmolr = 0;
+ u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4274,8 +4324,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = igb_write_uc_addr_list(netdev);
- if (count < 0) {
+ if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
rctl |= E1000_RCTL_UPE;
vmolr |= E1000_VMOLR_ROPE;
}
@@ -4298,6 +4347,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl);
+#if (PAGE_SIZE < 8192)
+ if (!adapter->vfs_allocated_count) {
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ rlpml = IGB_MAX_FRAME_BUILD_SKB;
+ }
+#endif
+ wr32(E1000_RLPML, rlpml);
+
/* In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames
@@ -4312,12 +4369,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
- /* enable Rx jumbo frames, no need for restriction */
+ /* enable Rx jumbo frames, restrict as needed to support build_skb */
vmolr &= ~E1000_VMOLR_RLPML_MASK;
- vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ vmolr |= IGB_MAX_FRAME_BUILD_SKB;
+ else
+#endif
+ vmolr |= MAX_JUMBO_FRAME_SIZE;
+ vmolr |= E1000_VMOLR_LPE;
wr32(E1000_VMOLR(vfn), vmolr);
- wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
igb_restore_vf_multicasts(adapter);
}
@@ -5256,18 +5318,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -5339,7 +5415,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- igb_unmap_and_free_tx_resource(tx_ring, first);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -6304,7 +6381,6 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- int rar_entry = hw->mac.rar_entry_count - (vf + 1);
u32 reg, msgbuf[3];
u8 *addr = (u8 *)(&msgbuf[1]);
@@ -6312,7 +6388,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_vf_reset(adapter, vf);
/* set vf mac address */
- igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
+ igb_set_vf_mac(adapter, vf, vf_mac);
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
@@ -6332,18 +6408,238 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_write_mbx(hw, msgbuf, 3, vf);
}
+static void igb_flush_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ igb_rar_set_index(adapter, i);
+ }
+}
+
+static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ /* do not count rar entries reserved for VFs MAC addresses */
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i, count = 0;
+
+ for (i = 0; i < rar_entries; i++) {
+ /* do not count default entries */
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
+ continue;
+
+ /* do not count "in use" entries for different queues */
+ if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
+ (adapter->mac_table[i].queue != queue))
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igb_set_default_mac_filter(struct igb_adapter *adapter)
+{
+ struct igb_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->queue = adapter->vfs_allocated_count;
+ mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
+
+ igb_rar_set_index(adapter, 0);
+}
+
+int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
+
+ igb_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+
+ igb_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
+
+ return min_t(int, ret, 0);
+}
+
+static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
+
+ return 0;
+}
+
+int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
+ const u32 info, const u8 *addr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ struct list_head *pos;
+ struct vf_mac_filter *entry = NULL;
+ int ret = 0;
+
+ switch (info) {
+ case E1000_VF_MAC_FILTER_CLR:
+ /* remove all unicast MAC filters related to the current VF */
+ list_for_each(pos, &adapter->vf_macs.l) {
+ entry = list_entry(pos, struct vf_mac_filter, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ igb_del_mac_filter(adapter, entry->vf_mac, vf);
+ }
+ }
+ break;
+ case E1000_VF_MAC_FILTER_ADD:
+ if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
+ /* try to find empty slot in the list */
+ list_for_each(pos, &adapter->vf_macs.l) {
+ entry = list_entry(pos, struct vf_mac_filter, l);
+ if (entry->free)
+ break;
+ }
+
+ if (entry && entry->free) {
+ entry->free = false;
+ entry->vf = vf;
+ ether_addr_copy(entry->vf_mac, addr);
+
+ ret = igb_add_mac_filter(adapter, addr, vf);
+ ret = min_t(int, ret, 0);
+ } else {
+ ret = -ENOSPC;
+ }
+
+ if (ret == -ENOSPC)
+ dev_warn(&pdev->dev,
+ "VF %d has requested MAC filter but there is no space for it\n",
+ vf);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
{
+ struct pci_dev *pdev = adapter->pdev;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
+
/* The VF MAC Address is stored in a packed array of bytes
* starting at the second 32 bit word of the msg array
*/
- unsigned char *addr = (char *)&msg[1];
- int err = -1;
+ unsigned char *addr = (unsigned char *)&msg[1];
+ int ret = 0;
- if (is_valid_ether_addr(addr))
- err = igb_set_vf_mac(adapter, vf, addr);
+ if (!info) {
+ if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
+ vf);
+ return -EINVAL;
+ }
- return err;
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC\n",
+ vf);
+ return -EINVAL;
+ }
+
+ ret = igb_set_vf_mac(adapter, vf, addr);
+ } else {
+ ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
+ }
+
+ return ret;
}
static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
@@ -6400,13 +6696,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case E1000_VF_SET_MAC_ADDR:
- retval = -EINVAL;
- if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
- retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
- else
- dev_warn(&pdev->dev,
- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
- vf);
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case E1000_VF_SET_PROMISC:
retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -6686,7 +6976,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* clear last DMA location and unmap remaining buffers */
@@ -6822,8 +7111,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6831,11 +7126,10 @@ static inline bool igb_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
- struct page *page,
- unsigned int truesize)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
@@ -6843,16 +7137,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_ref_count(page) != pagecnt_bias))
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+#define IGB_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ if (rx_buffer->page_offset > IGB_LAST_OFFSET)
return false;
#endif
@@ -6860,7 +7151,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(pagecnt_bias == 1)) {
+ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -6872,34 +7163,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
**/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+static void igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- unsigned int size,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
- unsigned int pull_len;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
+ if (unlikely(!skb))
+ return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6907,95 +7220,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
size -= IGB_TS_HDR_LEN;
}
- if (likely(size <= IGB_RX_HDR_LEN)) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!igb_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- return false;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IGB_RX_HDR_LEN)
+ headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
- va += pull_len;
- size -= pull_len;
-
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
- return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+ return skb;
}
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
{
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
- struct igb_rx_buffer *rx_buffer;
- struct page *page;
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
+ /* build an skb around the page buffer */
+ skb = build_skb(va - IGB_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IGB_SKB_PAD);
+ __skb_put(skb, size);
- /* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* We are not reusing the buffer so unmap it and free
- * any references we are holding to it
- */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
}
- /* clear contents of rx_buffer */
- rx_buffer->page = NULL;
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -7154,6 +7445,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+ const unsigned int size)
+{
+ struct igb_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer)
+{
+ if (igb_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
@@ -7163,6 +7495,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -7171,8 +7505,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
}
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -7181,13 +7515,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/
dma_rmb();
+ rx_buffer = igb_get_rx_buffer(rx_ring, size);
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (skb)
+ igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ else
+ skb = igb_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ igb_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -7231,6 +7577,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -7242,21 +7593,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igb_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
+ __free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -7264,7 +7617,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = igb_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
return true;
@@ -7279,6 +7632,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -7288,14 +7642,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = igb_rx_bufsz(rx_ring);
+
do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- IGB_RX_BUFSZ,
+ bi->page_offset, bufsz,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -7312,8 +7667,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -7630,6 +7985,36 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
+static void igb_deliver_wake_packet(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ u32 wupl;
+
+ wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
+
+ /* WUPM stores only the first 128 bytes of the wake packet.
+ * Read the packet only if we have the whole thing.
+ */
+ if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
+ return;
+
+ skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
+ if (!skb)
+ return;
+
+ skb_put(skb, wupl);
+
+ /* Ensure reads are 32-bit aligned */
+ wupl = roundup(wupl, 4);
+
+ memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+}
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *dev)
@@ -7659,7 +8044,7 @@ static int igb_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 err;
+ u32 err, val;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -7690,6 +8075,10 @@ static int igb_resume(struct device *dev)
*/
igb_get_hw_control(adapter);
+ val = rd32(E1000_WUS);
+ if (val & WAKE_PKT_WUS)
+ igb_deliver_wake_packet(netdev);
+
wr32(E1000_WUS, ~0);
rtnl_lock();
@@ -7948,11 +8337,16 @@ static void igb_io_resume(struct pci_dev *pdev)
igb_get_hw_control(adapter);
}
-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
- u8 qsel)
+/**
+ * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: Pointer to adapter structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ **/
+static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
{
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
+ u8 *addr = adapter->mac_table[index].addr;
/* HW expects these to be in network order when they are plugged
* into the registers which are little endian. In order to guarantee
@@ -7963,12 +8357,16 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
- rar_high |= E1000_RAH_AV;
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
+ rar_high |= E1000_RAH_AV;
- if (hw->mac.type == e1000_82575)
- rar_high |= E1000_RAH_POOL_1 * qsel;
- else
- rar_high |= E1000_RAH_POOL_1 << qsel;
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 *
+ adapter->mac_table[index].queue;
+ else
+ rar_high |= E1000_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
wr32(E1000_RAL(index), rar_low);
wrfl();
@@ -7984,10 +8382,13 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
* towards the first, as a result a collision should not be possible
*/
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
- memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-
- igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
+ ether_addr_copy(vf_mac_addr, mac_addr);
+ ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
+ adapter->mac_table[rar_entry].queue = vf;
+ adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
+ igb_rar_set_index(adapter, rar_entry);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c4477552ce9e..7a3fd4d74592 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* incoming frame. The value is stored in little endian format starting on
* byte 8.
**/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8dea1b1367ef..34faa113a8a0 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -71,45 +71,45 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
-static int igbvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
- ecmd->supported = SUPPORTED_1000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ecmd->advertising = ADVERTISED_1000baseT_Full;
-
- ecmd->port = -1;
- ecmd->transceiver = XCVR_DUMMY1;
+ cmd->base.port = -1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
-static int igbvf_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
return -EOPNOTSUPP;
}
@@ -443,8 +443,6 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
- .get_settings = igbvf_get_settings,
- .set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
@@ -467,6 +465,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
+ .get_link_ksettings = igbvf_get_link_ksettings,
+ .set_link_ksettings = igbvf_set_link_ksettings,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 6f4290d6dc9f..bf69f01f8467 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -101,6 +101,8 @@ enum latency_range {
#define IGBVF_MNG_VLAN_NONE (-1)
+#define IGBVF_MAX_MAC_FILTERS 3
+
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
@@ -241,7 +243,6 @@ struct igbvf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index f800bf8eedae..30d58c4a444e 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -62,6 +62,10 @@
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+/* VF requests PF to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests PF to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 839ba110f7fb..1b9cbbe88f6f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -400,8 +400,8 @@ next_desc:
adapter->total_rx_packets += total_packets;
adapter->total_rx_bytes += total_bytes;
- adapter->net_stats.rx_bytes += total_bytes;
- adapter->net_stats.rx_packets += total_packets;
+ netdev->stats.rx_bytes += total_bytes;
+ netdev->stats.rx_packets += total_packets;
return cleaned;
}
@@ -864,8 +864,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
}
}
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
+ netdev->stats.tx_bytes += total_bytes;
+ netdev->stats.tx_packets += total_packets;
return count < tx_ring->count;
}
@@ -1433,12 +1433,52 @@ static void igbvf_set_multi(struct net_device *netdev)
}
/**
+ * igbvf_set_uni - Configure unicast MAC filters
+ * @netdev: network interface device structure
+ *
+ * This routine is responsible for configuring the hardware for proper
+ * unicast filters.
+ **/
+static int igbvf_set_uni(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
+ pr_err("Too many unicast filters - No Space\n");
+ return -ENOSPC;
+ }
+
+ /* Clear all unicast MAC filters */
+ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ /* Add MAC filters one by one */
+ netdev_for_each_uc_addr(ha, netdev) {
+ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
+ ha->addr);
+ udelay(200);
+ }
+ }
+
+ return 0;
+}
+
+static void igbvf_set_rx_mode(struct net_device *netdev)
+{
+ igbvf_set_multi(netdev);
+ igbvf_set_uni(netdev);
+}
+
+/**
* igbvf_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
static void igbvf_configure(struct igbvf_adapter *adapter)
{
- igbvf_set_multi(adapter->netdev);
+ igbvf_set_rx_mode(adapter->netdev);
igbvf_restore_vlan(adapter);
@@ -1798,7 +1838,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.mprc;
+ adapter->netdev->stats.multicast = adapter->stats.mprc;
}
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
@@ -2334,21 +2374,6 @@ static void igbvf_reset_task(struct work_struct *work)
}
/**
- * igbvf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* igbvf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2635,8 +2660,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
.ndo_start_xmit = igbvf_xmit_frame,
- .ndo_get_stats = igbvf_get_stats,
- .ndo_set_rx_mode = igbvf_set_multi,
+ .ndo_set_rx_mode = igbvf_set_rx_mode,
.ndo_set_mac_address = igbvf_set_mac,
.ndo_change_mtu = igbvf_change_mtu,
.ndo_do_ioctl = igbvf_ioctl,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 335ba6642145..528be116184e 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -36,6 +36,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
u32, u32, u32);
static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
+static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 subcmd, u8 *addr);
static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
/**
@@ -66,6 +67,8 @@ static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_vf;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+ /* set mac filter */
+ mac->ops.set_uc_addr = e1000_set_uc_addr_vf;
/* set vlan filter table array */
mac->ops.set_vfta = e1000_set_vfta_vf;
@@ -338,6 +341,44 @@ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
}
/**
+ * e1000_set_uc_addr_vf - Set or clear unicast filters
+ * @hw: pointer to the HW structure
+ * @sub_cmd: add or clear filters
+ * @addr: pointer to the filter MAC address
+ **/
+static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 sub_cmd, u8 *addr)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3], msgbuf_chk;
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] |= sub_cmd;
+ msgbuf[0] |= E1000_VF_SET_MAC_ADDR;
+ msgbuf_chk = msgbuf[0];
+
+ if (addr)
+ memcpy(msg_addr, addr, ETH_ALEN);
+
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ if (!ret_val) {
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (msgbuf_chk | E1000_VT_MSGTYPE_NACK))
+ return -ENOSPC;
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_link_vf - Check for link for a virtual interface
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index f00a41d9a1ca..4cf78b0dec50 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -179,6 +179,7 @@ struct e1000_mac_operations {
s32 (*get_bus_info)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ s32 (*set_uc_addr)(struct e1000_hw *, u32, u8 *);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index e5d72559cca9..d10a0d242dda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -94,24 +94,30 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
static int
-ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
if (netif_carrier_ok(adapter->netdev)) {
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -126,13 +132,14 @@ void ixgb_set_speed_duplex(struct net_device *netdev)
}
static int
-ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
- if (ecmd->autoneg == AUTONEG_ENABLE ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
if (netif_running(adapter->netdev)) {
@@ -630,8 +637,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static const struct ethtool_ops ixgb_ethtool_ops = {
- .get_settings = ixgb_get_settings,
- .set_settings = ixgb_set_settings,
.get_drvinfo = ixgb_get_drvinfo,
.get_regs_len = ixgb_get_regs_len,
.get_regs = ixgb_get_regs,
@@ -649,6 +654,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_link_ksettings = ixgb_get_link_ksettings,
+ .set_link_ksettings = ixgb_set_link_ksettings,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fbd220d137b3..5a713199653c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -86,7 +86,6 @@ static void ixgb_set_multi(struct net_device *netdev);
static void ixgb_watchdog(unsigned long data);
static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
-static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data);
@@ -367,7 +366,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_open = ixgb_open,
.ndo_stop = ixgb_close,
.ndo_start_xmit = ixgb_xmit_frame,
- .ndo_get_stats = ixgb_get_stats,
.ndo_set_rx_mode = ixgb_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgb_set_mac,
@@ -1597,20 +1595,6 @@ ixgb_tx_timeout_task(struct work_struct *work)
}
/**
- * ixgb_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-
-static struct net_device_stats *
-ixgb_get_stats(struct net_device *netdev)
-{
- return &netdev->stats;
-}
-
-/**
* ixgb_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b1ecc2627a5a..76263762bea1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -86,17 +86,62 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
+#define IXGBE_RXBUFFER_1536 1536
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
-#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IXGBE_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
+
+static inline int ixgbe_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int ixgbe_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IXGBE_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ixgbe_compute_pad(rx_buf_len);
+}
+
+#define IXGBE_SKB_PAD ixgbe_skb_pad()
#else
-#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/*
@@ -190,7 +235,11 @@ struct vf_macvlans {
struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP uses address ptr on irq_clean */
+ void *data;
+ };
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
@@ -243,6 +292,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_TX_XDP_RING,
};
#define ring_uses_build_skb(ring) \
@@ -269,10 +319,17 @@ struct ixgbe_fwd_adapter {
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define ring_is_xdp(ring) \
+ test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+#define set_ring_xdp(ring) \
+ set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+#define clear_ring_xdp(ring) \
+ clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
+ struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
@@ -334,6 +391,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
@@ -361,7 +419,7 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
return IXGBE_RXBUFFER_3K;
#if (PAGE_SIZE < 8192)
if (ring_uses_build_skb(ring))
- return IXGBE_MAX_FRAME_BUILD_SKB;
+ return IXGBE_MAX_2K_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}
@@ -510,6 +568,7 @@ struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* OS defined structs */
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
unsigned long state;
@@ -576,6 +635,10 @@ struct ixgbe_adapter {
__be16 vxlan_port;
__be16 geneve_port;
+ /* XDP */
+ int num_xdp_queues;
+ struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -622,6 +685,7 @@ struct ixgbe_adapter {
u64 tx_busy;
unsigned int tx_ring_count;
+ unsigned int xdp_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
@@ -705,7 +769,7 @@ struct ixgbe_adapter {
u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
- u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+ u32 *rss_key;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -762,6 +826,7 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
};
@@ -771,6 +836,7 @@ extern const struct ixgbe_info ixgbe_82599_info;
extern const struct ixgbe_info ixgbe_X540_info;
extern const struct ixgbe_info ixgbe_X550_info;
extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB
@@ -790,7 +856,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
void ixgbe_reset(struct ixgbe_adapter *adapter);
void ixgbe_set_ethtool_ops(struct net_device *netdev);
-int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
int ixgbe_setup_tx_resources(struct ixgbe_ring *);
void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 90fa5bf23d1b..7e5e336d7dcc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -179,6 +179,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
return SUPPORTED_10000baseKR_Full;
default:
return SUPPORTED_10000baseKX4_Full |
@@ -186,60 +187,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
}
}
-static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
+ supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_10_FULL)
- ecmd->supported |= SUPPORTED_10baseT_Full;
+ supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
- ecmd->advertising = ecmd->supported;
+ advertising = supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
- ecmd->advertising = 0;
+ advertising = 0;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+ advertising |= supported & ADVRTSD_MSK_10G;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
- if (ecmd->supported & SUPPORTED_1000baseKX_Full)
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ if (supported & SUPPORTED_1000baseKX_Full)
+ advertising |= ADVERTISED_1000baseKX_Full;
else
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
} else {
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
}
if (autoneg) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
@@ -248,14 +251,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_phy_qt:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +276,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
@@ -285,102 +288,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_sfp_type_unknown:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
/* Indicate pause support */
- ecmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case ixgbe_fc_rx_pause:
- ecmd->advertising |= ADVERTISED_Pause |
+ advertising |= ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
break;
case ixgbe_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
+ advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
}
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ cmd->base.speed = SPEED_5000;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ cmd->base.speed = SPEED_2500;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case IXGBE_LINK_SPEED_100_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case IXGBE_LINK_SPEED_10_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
s32 err = 0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -388,12 +402,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->advertising & ~ecmd->supported)
+ if (advertising & ~supported)
return -EINVAL;
/* only allow one speed at a time if no autoneg */
- if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
- if (ecmd->advertising ==
+ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
+ if (advertising ==
(ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full))
return -EINVAL;
@@ -401,16 +415,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
old = hw->phy.autoneg_advertised;
advertised = 0;
- if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ if (advertising & ADVERTISED_10000baseT_Full)
advertised |= IXGBE_LINK_SPEED_10GB_FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (advertising & ADVERTISED_1000baseT_Full)
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
advertised |= IXGBE_LINK_SPEED_10_FULL;
if (old == advertised)
@@ -428,10 +442,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
} else {
/* in this case we currently only support 10Gb/FULL */
- u32 speed = ethtool_cmd_speed(ecmd);
- if ((ecmd->autoneg == AUTONEG_ENABLE) ||
- (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ u32 speed = cmd->base.speed;
+
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (advertising != ADVERTISED_10000baseT_Full) ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -1056,15 +1071,19 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ adapter->xdp_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
+ adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+ i = max_t(int, i, adapter->num_xdp_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1096,12 +1115,33 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ sizeof(struct ixgbe_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+
+ memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
@@ -1113,7 +1153,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&temp_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
while (i) {
i--;
@@ -1746,7 +1786,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
- err = ixgbe_setup_rx_resources(rx_ring);
+ err = ixgbe_setup_rx_resources(adapter, rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
@@ -2927,9 +2967,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- return sizeof(adapter->rss_key);
+ return IXGBE_RSS_KEY_SIZE;
}
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
@@ -3402,8 +3440,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
- .get_settings = ixgbe_get_settings,
- .set_settings = ixgbe_set_settings,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
@@ -3442,6 +3478,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 1b8be7d813bd..b45fdc98033d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -267,12 +267,14 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
**/
static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
- int i;
+ int i, reg_idx;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
+ for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
+ adapter->xdp_ring[i]->reg_idx = reg_idx;
return true;
}
@@ -308,6 +310,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_rss(adapter);
}
+static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
+{
+ return adapter->xdp_prog ? nr_cpu_ids : 0;
+}
+
#define IXGBE_RSS_64Q_MASK 0x3F
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
@@ -382,6 +389,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues_per_pool = tcs;
adapter->num_tx_queues = vmdq_i * tcs;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = vmdq_i * tcs;
#ifdef IXGBE_FCOE
@@ -479,6 +487,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = rss_i * tcs;
return true;
@@ -549,6 +558,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = vmdq_i * rss_i;
adapter->num_tx_queues = vmdq_i * rss_i;
+ adapter->num_xdp_queues = 0;
/* disable ATR as it is not supported when VMDq is enabled */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -669,6 +679,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
adapter->num_rx_queues = rss_i;
adapter->num_tx_queues = rss_i;
+ adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
return true;
}
@@ -689,6 +700,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
@@ -719,8 +731,11 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i, vectors, vector_threshold;
- /* We start by asking for one vector per queue pair */
+ /* We start by asking for one vector per queue pair with XDP queues
+ * being stacked with TX queues.
+ */
vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ vectors = max(vectors, adapter->num_xdp_queues);
/* It is easy to be greedy for MSI-X vectors. However, it really
* doesn't do much good if we have a lot more vectors than CPUs. We'll
@@ -800,6 +815,8 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
* @v_idx: index of vector in adapter struct
* @txr_count: total number of Tx rings to allocate
* @txr_idx: index of first Tx ring to allocate
+ * @xdp_count: total number of XDP rings to allocate
+ * @xdp_idx: index of first XDP ring to allocate
* @rxr_count: total number of Rx rings to allocate
* @rxr_idx: index of first Rx ring to allocate
*
@@ -808,6 +825,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int v_count, int v_idx,
int txr_count, int txr_idx,
+ int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
@@ -817,7 +835,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int ring_count, size;
u8 tcs = netdev_get_num_tc(adapter->netdev);
- ring_count = txr_count + rxr_count;
+ ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
@@ -909,6 +927,33 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring++;
}
+ while (xdp_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbe_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = xdp_idx;
+ set_ring_xdp(ring);
+
+ /* assign ring to adapter */
+ adapter->xdp_ring[xdp_idx] = ring;
+
+ /* update count and index */
+ xdp_count--;
+ xdp_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
@@ -1002,17 +1047,18 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int xdp_remaining = adapter->num_xdp_queues;
+ int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
int err;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
for (; rxr_remaining; v_idx++) {
err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
+ 0, 0, 0, 0, 1, rxr_idx);
if (err)
goto err_out;
@@ -1026,8 +1072,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
+
err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx,
+ xqpv, xdp_idx,
rqpv, rxr_idx);
if (err)
@@ -1036,14 +1085,17 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
/* update counts and index */
rxr_remaining -= rqpv;
txr_remaining -= tqpv;
+ xdp_remaining -= xqpv;
rxr_idx++;
txr_idx++;
+ xdp_idx += xqpv;
}
return 0;
err_out:
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
@@ -1066,6 +1118,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
int v_idx = adapter->num_q_vectors;
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
@@ -1172,9 +1225,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_register(adapter);
- e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
- adapter->num_rx_queues, adapter->num_tx_queues);
+ adapter->num_rx_queues, adapter->num_tx_queues,
+ adapter->num_xdp_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
@@ -1195,6 +1249,7 @@ err_alloc_q_vectors:
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
ixgbe_free_q_vectors(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a7a430a7be2c..22a29df1d29e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -49,6 +49,9 @@
#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/atomic.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -85,6 +88,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
@@ -131,9 +135,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
@@ -508,7 +514,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
*/
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
- int i = 0, j = 0;
+ int i;
char rname[16];
u32 regs[64];
@@ -570,21 +576,38 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
- pr_info("%-15s %08x\n", reginfo->name,
- IXGBE_READ_REG(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
- for (i = 0; i < 8; i++) {
- snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
- pr_err("%-15s", rname);
+ i = 0;
+ while (i < 64) {
+ int j;
+ char buf[9 * 8 + 1];
+ char *p = buf;
+
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
for (j = 0; j < 8; j++)
- pr_cont(" %08x", regs[i*8+j]);
- pr_cont("\n");
+ p += sprintf(p, " %08x", regs[i++]);
+ pr_err("%-15s%s\n", rname, buf);
}
}
+static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
+{
+ struct ixgbe_tx_buffer *tx_buffer;
+
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
+ pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
+ n, ring->next_to_use, ring->next_to_clean,
+ (u64)dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp);
+}
+
/*
* ixgbe_dump - Print registers, tx-rings and rx-rings
*/
@@ -594,14 +617,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_reg_info *reginfo;
int n = 0;
- struct ixgbe_ring *tx_ring;
+ struct ixgbe_ring *ring;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
struct ixgbe_ring *rx_ring;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer_info;
- u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
@@ -635,14 +657,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
"Queue [NTU] [NTC] [bi(ntc)->dma ]",
"leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
- tx_ring = adapter->tx_ring[n];
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- tx_buffer->next_to_watch,
- (u64)tx_buffer->time_stamp);
+ ring = adapter->tx_ring[n];
+ ixgbe_print_buffer(ring, n);
+ }
+
+ for (n = 0; n < adapter->num_xdp_queues; n++) {
+ ring = adapter->xdp_ring[n];
+ ixgbe_print_buffer(ring, n);
}
/* Print TX Rings */
@@ -687,21 +708,32 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
- tx_ring = adapter->tx_ring[n];
+ ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s %s %s %s %s\n",
"T [desc] [address 63:0 ] ",
"[PlPOIdStDDt Ln] [bi->dma ] ",
"leng", "ntw", "timestamp", "bi->skb");
- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = IXGBE_TX_DESC(tx_ring, i);
- tx_buffer = &tx_ring->tx_buffer_info[i];
+ for (i = 0; ring->desc && (i < ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC(ring, i);
+ tx_buffer = &ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
- pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
+ const char *ring_desc;
+
+ if (i == ring->next_to_use &&
+ i == ring->next_to_clean)
+ ring_desc = " NTC/U";
+ else if (i == ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+ pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -709,16 +741,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
- tx_buffer->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- pr_cont(" NTC/U\n");
- else if (i == tx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == tx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
+ tx_buffer->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
@@ -797,34 +821,44 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("%s%s%s",
+ pr_info("%s%s%s\n",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
- "<-- Adv Rx Read format\n");
- pr_info("%s%s%s",
+ "<-- Adv Rx Read format");
+ pr_info("%s%s%s\n",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
- "<-- Adv Rx Write-Back format\n");
+ "<-- Adv Rx Write-Back format");
for (i = 0; i < rx_ring->count; i++) {
+ const char *ring_desc;
+
+ if (i == rx_ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- if (staterr & IXGBE_RXD_STAT_DD) {
+ if (rx_desc->wb.upper.length) {
/* Descriptor Done */
- pr_info("RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
} else {
- pr_info("R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)rx_buffer_info->dma,
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->dma) {
@@ -835,14 +869,6 @@ rx_ring_summary:
ixgbe_rx_bufsz(rx_ring), true);
}
}
-
- if (i == rx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == rx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
-
}
}
}
@@ -972,6 +998,10 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
+
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1016,6 +1046,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+
+ tc = xdp_ring->dcb_tc;
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
+ }
}
static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
@@ -1167,7 +1205,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1228,7 +1269,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang\n"
+ e_err(drv, "Detected Tx Unit Hang %s\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
@@ -1236,13 +1277,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
+ ring_is_xdp(tx_ring) ? "(XDP)" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, i,
tx_ring->tx_buffer_info[i].time_stamp, jiffies);
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ if (!ring_is_xdp(tx_ring))
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
@@ -1255,6 +1299,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -1846,6 +1893,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
*
+ * Check if the skb is valid in the XDP case it will be an error pointer.
+ * Return true in this case to abort processing and advance to next
+ * descriptor.
+ *
* Check for corrupted packet headers caused by senders on the local L2
* embedded NIC switch not setting up their Tx Descriptors right. These
* should be very rare.
@@ -1864,6 +1915,10 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
@@ -2039,7 +2094,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2060,21 +2115,22 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
@@ -2087,7 +2143,7 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
IXGBE_CB(skb)->dma = rx_buffer->dma;
skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset,
+ xdp->data - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
@@ -2095,7 +2151,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset += truesize;
#endif
} else {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ memcpy(__skb_put(skb, size),
+ xdp->data, ALIGN(size, sizeof(long)));
rx_buffer->pagecnt_bias++;
}
@@ -2104,32 +2161,32 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* build an skb to around the page buffer */
- skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IXGBE_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
/* record DMA address if this is the start of a chain of buffers */
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
@@ -2145,6 +2202,65 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED 1
+#define IXGBE_XDP_TX 2
+
+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_buff *xdp);
+
+static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int result = IXGBE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = ixgbe_xmit_xdp_ring(adapter, xdp);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
@@ -2163,17 +2279,19 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
+#ifdef IXGBE_FCOE
int ddp_bytes;
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ bool xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ struct xdp_buff xdp;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2196,14 +2314,34 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_hard_start = xdp.data -
+ ixgbe_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
+ xdp_xmit = true;
+ ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+ } else if (skb) {
ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
+ } else if (ring_uses_build_skb(rx_ring)) {
skb = ixgbe_build_skb(rx_ring, rx_buffer,
- rx_desc, size);
- else
+ &xdp, rx_desc);
+ } else {
skb = ixgbe_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ &xdp, rx_desc);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2260,6 +2398,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -3364,6 +3512,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
}
static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
@@ -3489,6 +3639,28 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_init_rss_key - Initialize adapter RSS key
+ * @adapter: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
+{
+ u32 *rss_key;
+
+ if (!adapter->rss_key) {
+ rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
+ adapter->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_store_reta - Write the RETA table to HW
* @adapter: device handle
*
@@ -3590,7 +3762,7 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- adapter->rss_key[i]);
+ *(adapter->rss_key + i));
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3651,7 +3823,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
unsigned int pf_pool = adapter->num_vfs;
@@ -3802,7 +3973,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
- rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
@@ -3972,8 +4143,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
- if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
- (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}
@@ -5505,7 +5676,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5546,7 +5720,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
/* reset BQL for queue */
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (!ring_is_xdp(tx_ring))
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
@@ -5575,6 +5750,8 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_clean_tx_ring(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
}
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
@@ -5669,6 +5846,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
u8 reg_idx = adapter->tx_ring[i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
/* Disable the Tx DMA engine on 82599 and later MAC */
switch (hw->mac.type) {
@@ -5854,6 +6036,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (!adapter->mac_table)
return -ENOMEM;
+ if (ixgbe_init_rss_key(adapter))
+ return -ENOMEM;
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5944,10 +6129,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
- adapter->num_vfs = 0;
+ max_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
- } else {
- adapter->num_vfs = max_vfs;
}
}
#endif /* CONFIG_PCI_IOV */
@@ -6041,7 +6224,7 @@ err:
**/
static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
{
- int i, err = 0;
+ int i, j = 0, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
@@ -6051,10 +6234,20 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
goto err_setup_tx;
}
+ for (j = 0; j < adapter->num_xdp_queues; j++) {
+ err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
+ if (!err)
+ continue;
+
+ e_err(probe, "Allocation for Tx Queue %u failed\n", j);
+ goto err_setup_tx;
+ }
return 0;
err_setup_tx:
/* rewind the index freeing the rings as we go */
+ while (j--)
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
while (i--)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
@@ -6066,7 +6259,8 @@ err_setup_tx:
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
@@ -6105,6 +6299,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -6128,7 +6324,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
@@ -6184,6 +6380,9 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ if (adapter->xdp_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter->xdp_ring[i]);
}
/**
@@ -6196,6 +6395,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
ixgbe_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -6602,6 +6802,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+
+ restart_queue += xdp_ring->tx_stats.restart_queue;
+ tx_busy += xdp_ring->tx_stats.tx_busy;
+ bytes += xdp_ring->stats.bytes;
+ packets += xdp_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
adapter->tx_busy = tx_busy;
netdev->stats.tx_bytes = bytes;
@@ -6795,6 +7003,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
&(adapter->tx_ring[i]->state));
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &adapter->xdp_ring[i]->state);
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -6828,6 +7039,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ set_check_for_tx_hang(adapter->xdp_ring[i]);
}
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
@@ -7058,6 +7271,13 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+
+ if (ring->next_to_use != ring->next_to_clean)
+ return true;
+ }
+
return false;
}
@@ -8015,6 +8235,62 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
#endif
}
+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_buff *xdp)
+{
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+ u16 i;
+
+ len = xdp->data_end - xdp->data;
+
+ if (unlikely(!ixgbe_desc_unused(ring)))
+ return IXGBE_XDP_CONSUMED;
+
+ dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ return IXGBE_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = ring->next_to_use;
+ tx_desc = IXGBE_TX_DESC(ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->data = xdp->data;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ ring->next_to_use = i;
+
+ return IXGBE_XDP_TX;
+}
+
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -8306,6 +8582,23 @@ static void ixgbe_netpoll(struct net_device *netdev)
#endif
+static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
+ struct ixgbe_ring *ring)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+}
+
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -8331,18 +8624,13 @@ static void ixgbe_get_stats64(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
- u64 bytes, packets;
- unsigned int start;
- if (ring) {
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
- }
+ ixgbe_get_ring_stats64(stats, ring);
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+
+ ixgbe_get_ring_stats64(stats, ring);
}
rcu_read_unlock();
@@ -8948,7 +9236,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return ixgbe_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return ixgbe_setup_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_PCI_IOV
@@ -9459,6 +9749,68 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return -EINVAL;
+
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ return -EINVAL;
+
+ /* verify ixgbe ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring_is_rsc_enabled(ring))
+ return -EINVAL;
+
+ if (frame_size > ixgbe_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ if (nr_cpu_ids > MAX_XDP_QUEUES)
+ return -ENOMEM;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+
+ /* If transitioning XDP modes reconfigure rings */
+ if (!!prog != !!old_prog) {
+ int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+
+ if (err) {
+ rcu_assign_pointer(adapter->xdp_prog, old_prog);
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ixgbe_xdp_setup(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!(adapter->xdp_prog);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -9504,6 +9856,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
+ .ndo_xdp = ixgbe_xdp,
};
/**
@@ -9808,7 +10161,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbe_init_mbx_params_pf(hw);
hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
- ixgbe_enable_sriov(adapter);
+ ixgbe_enable_sriov(adapter, max_vfs);
skip_sriov:
#endif
@@ -9934,6 +10287,9 @@ skip_sriov:
if (err)
goto err_sw_init;
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ u64_stats_init(&adapter->xdp_ring[i]->syncp);
+
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
@@ -10059,6 +10415,7 @@ err_sw_init:
iounmap(adapter->io_addr);
kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
+ kfree(adapter->rss_key);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -10143,6 +10500,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
}
kfree(adapter->mac_table);
+ kfree(adapter->rss_key);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e55b2602f371..654a402f0e9e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -792,7 +792,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
/* Setup link based on the new speed settings */
- hw->phy.ops.setup_link(hw);
+ if (hw->phy.ops.setup_link)
+ hw->phy.ops.setup_link(hw);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 044cb44747cf..8baf298a8516 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -46,88 +46,96 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
-
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
- e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
-
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
- adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+ int num_vf_macvlans, i;
num_vf_macvlans = hw->mac.num_rar_entries -
- (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+ (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return;
- adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
- sizeof(struct vf_macvlans),
- GFP_KERNEL);
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
- mv_list->vf = -1;
- mv_list->free = true;
- list_add(&mv_list->l, &adapter->vf_mvs.l);
- mv_list++;
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].l, &adapter->vf_mvs.l);
}
+ adapter->mv_list = mv_list;
}
+}
+
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ /* Allocate memory for per VF control structures */
+ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!adapter->vfinfo)
+ return -ENOMEM;
+
+ adapter->num_vfs = num_vfs;
+
+ ixgbe_alloc_vf_macvlans(adapter, num_vfs);
+ adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
/* Initialize default switching mode VEB */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* If call to enable VFs succeeded then allocate memory
- * for per VF control structures.
- */
- adapter->vfinfo =
- kcalloc(adapter->num_vfs,
- sizeof(struct vf_data_storage), GFP_KERNEL);
- if (adapter->vfinfo) {
- /* limit trafffic classes based on VFs enabled */
- if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (adapter->num_vfs < 16)) {
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- } else if (adapter->num_vfs < 32) {
- adapter->dcb_cfg.num_tcs.pg_tcs = 4;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
- } else {
- adapter->dcb_cfg.num_tcs.pg_tcs = 1;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
- }
-
- /* Disable RSC when in SR-IOV mode */
- adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
- IXGBE_FLAG2_RSC_ENABLED);
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
- for (i = 0; i < adapter->num_vfs; i++) {
- /* enable spoof checking for all VFs */
- adapter->vfinfo[i].spoofchk_enabled = true;
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
- /* We support VF RSS querying only for 82599 and x540
- * devices at the moment. These devices share RSS
- * indirection table and RSS hash key with PF therefore
- * we want to disable the querying by default.
- */
- adapter->vfinfo[i].rss_query_enabled = 0;
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ adapter->vfinfo[i].spoofchk_enabled = true;
- /* Untrust all VFs */
- adapter->vfinfo[i].trusted = false;
+ /* We support VF RSS querying only for 82599 and x540
+ * devices at the moment. These devices share RSS
+ * indirection table and RSS hash key with PF therefore
+ * we want to disable the querying by default.
+ */
+ adapter->vfinfo[i].rss_query_enabled = 0;
- /* set the default xcast mode */
- adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
- }
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
- return 0;
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
- return -ENOMEM;
+ e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
+ return 0;
}
/**
@@ -165,12 +173,13 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
/* Note this function is called when the user wants to enable SR-IOV
* VFs using the now deprecated module parameter
*/
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
{
int pre_existing_vfs = 0;
+ unsigned int num_vfs;
pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
+ if (!pre_existing_vfs && !max_vfs)
return;
/* If there are pre-existing VFs then we have to force
@@ -180,7 +189,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* have been created via the new PCI SR-IOV sysfs interface.
*/
if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
+ num_vfs = pre_existing_vfs;
dev_warn(&adapter->pdev->dev,
"Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
} else {
@@ -192,17 +201,16 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater than
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
+ num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
return;
}
}
- if (!__ixgbe_enable_sriov(adapter)) {
+ if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
ixgbe_get_vfs(adapter);
return;
}
@@ -298,6 +306,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err = 0;
+ u8 num_tc;
int i;
int pre_existing_vfs = pci_num_vf(dev);
@@ -310,23 +319,41 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
return err;
/* While the SR-IOV capability structure reports total VFs to be 64,
- * we have to limit the actual number allocated based on two factors.
+ * we limit the actual number allocated as below based on two factors.
+ * Num_TCs MAX_VFs
+ * 1 63
+ * <=4 31
+ * >4 15
* First, we reserve some transmit/receive resources for the PF.
* Second, VMDQ also uses the same pools that SR-IOV does. We need to
* account for this, so that we don't accidentally allocate more VFs
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
- return -EPERM;
+ num_tc = netdev_get_num_tc(adapter->netdev);
- adapter->num_vfs = num_vfs;
+ if (num_tc > 4) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
+ return -EPERM;
+ }
+ } else if ((num_tc > 1) && (num_tc <= 4)) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
+ return -EPERM;
+ }
+ } else {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
+ return -EPERM;
+ }
+ }
- err = __ixgbe_enable_sriov(adapter);
+ err = __ixgbe_enable_sriov(adapter, num_vfs);
if (err)
return err;
- for (i = 0; i < adapter->num_vfs; i++)
+ for (i = 0; i < num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
/* reset before enabling SRIOV to avoid mailbox issues */
@@ -650,58 +677,6 @@ update_vlvfb:
}
}
-static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
-
- /* remove VLAN filters beloning to this VF */
- ixgbe_clear_vf_vlans(adapter, vf);
-
- /* add back PF assigned VLAN or VLAN 0 */
- ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
-
- /* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
-
- /* set outgoing tags for VFs */
- if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
- ixgbe_clear_vmvir(adapter, vf);
- } else {
- if (vfinfo->pf_qos || !num_tcs)
- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
- vfinfo->pf_qos, vf);
- else
- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
- adapter->default_up, vf);
-
- if (vfinfo->spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- }
-
- /* reset multicast table array for vf */
- adapter->vfinfo[vf].num_vf_mc_hashes = 0;
-
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
-
- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
-
- /* reset VF api back to unknown */
- adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
-}
-
-static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr)
-{
- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
-
- return 0;
-}
-
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
@@ -757,6 +732,59 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* remove VLAN filters beloning to this VF */
+ ixgbe_clear_vf_vlans(adapter, vf);
+
+ /* add back PF assigned VLAN or VLAN 0 */
+ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ ixgbe_clear_vmvir(adapter, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+ if (vfinfo->spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ }
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
+
+ /* reset VF api back to unknown */
+ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+}
+
+static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+
+ return 0;
+}
+
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -1085,7 +1113,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return -EOPNOTSUPP;
}
- memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
+ memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
return 0;
}
@@ -1319,18 +1347,26 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (is_zero_ether_addr(mac)) {
+ adapter->vfinfo[vf].pf_set_mac = false;
+ dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
+ } else if (is_valid_ether_addr(mac)) {
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+ mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
return -EINVAL;
- adapter->vfinfo[vf].pf_set_mac = true;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
- " change effective.");
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
- " attempting to use the VF device.\n");
}
+
return ixgbe_set_vf_mac(adapter, vf, mac);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 0c7977d27b71..cf67b9b18ed7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,6 +33,9 @@
* 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS
+#define IXGBE_MAX_VFS_4TC 32
+#define IXGBE_MAX_VFS_8TC 16
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
@@ -56,7 +59,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
#endif
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 1d07f2ead914..9c2460c5ef1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -85,6 +85,7 @@
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
@@ -1387,9 +1388,6 @@ struct ixgbe_thermal_sensor_data {
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
-
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
#define IXGBE_PHY_INIT_END_NL 0xFFFF
@@ -3128,7 +3126,9 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3754,15 +3754,6 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
-#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
-
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3779,12 +3770,14 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
-#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21)
#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 84a467a8ed3d..6ea0d6a5fb90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
usleep_range(1000, 1200);
/* Poll for reset bit to self-clear indicating reset is complete */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 200f847fd8f3..2ba024b575ea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -49,6 +49,18 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
+static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so similar */
+ ixgbe_get_invariants_X540(hw);
+
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -320,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
@@ -331,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
/* Fallthrough */
- case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
@@ -2145,6 +2172,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
@@ -2215,8 +2244,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_2_5GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = true;
}
return 0;
@@ -2473,44 +2533,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return ixgbe_restart_an_internal_phy_x550em(hw);
}
-/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KX4 PHY.
- **/
-static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
- if (status)
- return status;
-
- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
-
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
-
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
-
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
-
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
-
- return status;
-}
-
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
@@ -2521,6 +2543,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -3134,7 +3159,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Set functions pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
- phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
@@ -3143,6 +3168,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* Save NW management interface connected on board. This is used
* to determine internal PHY mode
@@ -3164,10 +3195,18 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
case ixgbe_phy_fw:
phy->ops.setup_link = ixgbe_setup_fw_link;
phy->ops.reset = ixgbe_reset_phy_fw;
break;
+ case ixgbe_phy_ext_1g_t:
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = NULL;
+ phy->ops.write_reg = NULL;
+ break;
default:
break;
}
@@ -3193,6 +3232,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@@ -3300,6 +3340,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
u32 ctrl = 0;
u32 i;
bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3345,9 +3386,16 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
usleep_range(1000, 1200);
/* Poll for reset bit to self-clear meaning reset is complete */
@@ -3780,7 +3828,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = ixgbe_get_link_capabilities_X550em,
.get_bus_info = ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
@@ -3862,6 +3910,17 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
.write_reg = &ixgbe_write_phy_reg_generic,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = {
+ X550_COMMON_PHY
+ .check_overtemp = NULL,
+ .init = ixgbe_init_phy_ops_X550em,
+ .identify = ixgbe_identify_phy_x550em,
+ .read_reg = NULL,
+ .write_reg = NULL,
+ .read_reg_mdi = NULL,
+ .write_reg_mdi = NULL,
+};
+
static const struct ixgbe_phy_operations phy_ops_x550em_a = {
X550_COMMON_PHY
.check_overtemp = &ixgbe_tn_check_overtemp,
@@ -3924,6 +3983,16 @@ const struct ixgbe_info ixgbe_X550EM_x_info = {
.link_ops = &link_ops_x550em_x,
};
+const struct ixgbe_info ixgbe_x550em_x_fw_info = {
+ .mac = ixgbe_mac_X550EM_x,
+ .get_invariants = ixgbe_get_invariants_X550_x_fw,
+ .mac_ops = &mac_ops_X550EM_x,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_x_fw,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550EM_x,
+};
+
const struct ixgbe_info ixgbe_x550em_a_info = {
.mac = ixgbe_mac_x550em_a,
.get_invariants = &ixgbe_get_invariants_X550_a,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1f6c0ecd50bb..ff9d05f308ee 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -80,7 +80,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
#define IXGBEVF_QUEUE_STATS_LEN ( \
(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
- (sizeof(struct ixgbe_stats) / sizeof(u64)))
+ (sizeof(struct ixgbevf_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
@@ -91,18 +91,18 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-static int ixgbevf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbevf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = -1;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = -1;
hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -122,11 +122,11 @@ static int ixgbevf_get_settings(struct net_device *netdev,
break;
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -855,7 +855,8 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
if (key)
- memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
+ memcpy(key, adapter->rss_key,
+ ixgbevf_get_rxfh_key_size(netdev));
if (indir) {
int i;
@@ -885,7 +886,6 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
- .get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
@@ -905,6 +905,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
+ .get_link_ksettings = ixgbevf_get_link_ksettings,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a8cbc2dda0dd..581f44bbd7b3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -319,7 +319,7 @@ struct ixgbevf_adapter {
spinlock_t mbx_lock;
unsigned long last_reset;
- u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+ u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 80bab261a0ec..eee29bddddc1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1660,6 +1660,28 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
reg_idx);
}
+/**
+ * ixgbevf_init_rss_key - Initialize adapter RSS key
+ * @adapter: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
+{
+ u32 *rss_key;
+
+ if (!adapter->rss_key) {
+ rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
+ adapter->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1668,9 +1690,8 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
if (j == rss_i)
@@ -2611,6 +2632,12 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mbx.ops.init_params(hw);
+ if (hw->mac.type >= ixgbe_mac_X550_vf) {
+ err = ixgbevf_init_rss_key(adapter);
+ if (err)
+ goto out;
+ }
+
/* assume legacy case in which PF would only give VF 2 queues */
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
@@ -4127,6 +4154,7 @@ err_register:
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->io_addr);
+ kfree(adapter->rss_key);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -4173,6 +4201,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
+ kfree(adapter->rss_key);
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 8a5db9d7219d..b6d0c01eab10 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -333,7 +333,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
switch (hw->api_version) {
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
- if (hw->mac.type >= ixgbe_mac_X550_vf)
+ if (hw->mac.type < ixgbe_mac_X550_vf)
break;
default:
return -EOPNOTSUPP;
@@ -399,7 +399,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
switch (hw->api_version) {
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
- if (hw->mac.type >= ixgbe_mac_X550_vf)
+ if (hw->mac.type < ixgbe_mac_X550_vf)
break;
default:
return -EOPNOTSUPP;
@@ -419,7 +419,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
return -EPERM;
/* If we didn't get an ACK there must have been
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d2555e8b947e..da6fb825afea 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -82,13 +82,13 @@ config MVNETA_BM
that all dependencies are met.
config MVPP2
- tristate "Marvell Armada 375 network interface support"
+ tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA 375 SoC.
+ Marvell ARMADA 375, 7K and 8K SoCs.
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index a0d1b084ecec..90a60b98c28e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -53,7 +53,7 @@
struct orion_mdio_dev {
struct mutex lock;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk[3];
/*
* If we have access to the error interrupt pin (which is
* somewhat misnamed as it not only reflects internal errors
@@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
- int ret;
+ int i, ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -216,11 +216,20 @@ static int orion_mdio_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->smi_busy_wait);
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(dev->clk))
- clk_prepare_enable(dev->clk);
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
dev->err_interrupt = platform_get_irq(pdev, 0);
+ if (dev->err_interrupt > 0 &&
+ resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
+ dev_err(&pdev->dev,
+ "disabling interrupt, resource size is too small\n");
+ dev->err_interrupt = 0;
+ }
if (dev->err_interrupt > 0) {
ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
orion_mdio_err_irq,
@@ -251,8 +260,16 @@ static int orion_mdio_probe(struct platform_device *pdev)
return 0;
out_mdio:
- if (!IS_ERR(dev->clk))
- clk_disable_unprepare(dev->clk);
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
+
return ret;
}
@@ -260,11 +277,18 @@ static int orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct orion_mdio_dev *dev = bus->priv;
+ int i;
- writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
- if (!IS_ERR(dev->clk))
- clk_disable_unprepare(dev->clk);
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 61dd4462411c..d297011b535d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -431,6 +431,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
u16 rx_offset_correction;
+ const struct mbus_dram_target_info *dram_target_info;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -1106,7 +1107,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
q_map = 0;
for (queue = 0; queue < txq_number; queue++) {
struct mvneta_tx_queue *txq = &pp->txqs[queue];
- if (txq->descs != NULL)
+ if (txq->descs)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
@@ -1115,7 +1116,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
for (queue = 0; queue < rxq_number; queue++) {
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
- if (rxq->descs != NULL)
+ if (rxq->descs)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
@@ -2849,7 +2850,7 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
- if (rxq->descs == NULL)
+ if (!rxq->descs)
return -ENOMEM;
rxq->last_desc = rxq->size - 1;
@@ -2919,7 +2920,7 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
- if (txq->descs == NULL)
+ if (!txq->descs)
return -ENOMEM;
txq->last_desc = txq->size - 1;
@@ -2932,8 +2933,9 @@ static int mvneta_txq_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
- txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
- if (txq->tx_skb == NULL) {
+ txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
+ GFP_KERNEL);
+ if (!txq->tx_skb) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -2944,7 +2946,7 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
- if (txq->tso_hdrs == NULL) {
+ if (!txq->tso_hdrs) {
kfree(txq->tx_skb);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -3317,6 +3319,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
static int mvneta_mdio_probe(struct mvneta_port *pp)
{
struct phy_device *phy_dev;
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
pp->phy_interface);
@@ -3325,6 +3328,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
return -ENODEV;
}
+ phy_ethtool_get_wol(phy_dev, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
@@ -3941,10 +3947,16 @@ static void mvneta_ethtool_get_wol(struct net_device *dev,
static int mvneta_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
+ int ret;
+
if (!dev->phydev)
return -EOPNOTSUPP;
- return phy_ethtool_set_wol(dev->phydev, wol);
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
+
+ return ret;
}
static const struct net_device_ops mvneta_netdev_ops = {
@@ -3991,8 +4003,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
/* Set port default values */
mvneta_defaults_set(pp);
- pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
- GFP_KERNEL);
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
if (!pp->txqs)
return -ENOMEM;
@@ -4004,8 +4015,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
}
- pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
- GFP_KERNEL);
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
if (!pp->rxqs)
return -ENOMEM;
@@ -4016,9 +4026,11 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
rxq->size = pp->rx_ring_size;
rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
rxq->time_coal = MVNETA_RX_COAL_USEC;
- rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
- rxq->size * sizeof(void *),
- GFP_KERNEL);
+ rxq->buf_virt_addr
+ = devm_kmalloc_array(pp->dev->dev.parent,
+ rxq->size,
+ sizeof(*rxq->buf_virt_addr),
+ GFP_KERNEL);
if (!rxq->buf_virt_addr)
return -ENOMEM;
}
@@ -4098,6 +4110,8 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
ctrl |= MVNETA_GMAC2_PORT_RGMII;
break;
default:
@@ -4118,7 +4132,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{
- const struct mbus_dram_target_info *dram_target_info;
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
@@ -4267,13 +4280,13 @@ static int mvneta_probe(struct platform_device *pdev)
pp->tx_csum_limit = tx_csum_limit;
- dram_target_info = mv_mbus_dram_info();
+ pp->dram_target_info = mv_mbus_dram_info();
/* Armada3700 requires setting default configuration of Mbus
* windows, however without using filled mbus_dram_target_info
* structure.
*/
- if (dram_target_info || pp->neta_armada3700)
- mvneta_conf_mbus_windows(pp, dram_target_info);
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -4405,6 +4418,61 @@ static int mvneta_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mvneta_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ mvneta_stop(dev);
+ netif_device_detach(dev);
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
+ return 0;
+}
+
+static int mvneta_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+ int err;
+
+ clk_prepare_enable(pp->clk);
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->bm_priv = NULL;
+ }
+ }
+ mvneta_defaults_set(pp);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(device, "can't power up port\n");
+ return err;
+ }
+
+ if (pp->use_inband_status)
+ mvneta_fixed_link_update(pp, dev->phydev);
+
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ mvneta_open(dev);
+ mvneta_set_rx_mode(dev);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
+
static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
@@ -4419,6 +4487,7 @@ static struct platform_driver mvneta_driver = {
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
+ .pm = &mvneta_pm_ops,
},
};
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d00421b9ffea..9b875d776b29 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -25,6 +25,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -49,9 +50,11 @@
#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
#define MVPP2_RXQ_POOL_SHORT_OFFS 20
-#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
#define MVPP2_RXQ_POOL_LONG_OFFS 24
-#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
@@ -99,6 +102,7 @@
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP22_DESC_ADDR_OFFS 8
#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
@@ -117,9 +121,6 @@
#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
-#define MVPP2_TXQ_THRESH_REG 0x2094
-#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
-#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
#define MVPP2_TXQ_INDEX_REG 0x2098
#define MVPP2_TXQ_PREF_BUF_REG 0x209c
#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
@@ -140,6 +141,7 @@
#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
@@ -152,10 +154,52 @@
#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
#define MVPP2_BASE_ADDR_ENABLE 0x4060
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
/* Interrupt Cause and Mask registers */
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
-#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
@@ -210,14 +254,19 @@
#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
+#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
#define MVPP2_BM_VIRT_RLS_REG 0x64c0
-#define MVPP2_BM_MC_RLS_REG 0x64c4
-#define MVPP2_BM_MC_ID_MASK 0xfff
-#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
+#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
+#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
@@ -287,6 +336,24 @@
#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_CTRL_4_REG 0x90
+#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
+#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
+#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+
+/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL3_REG 0x11c
+#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+
+/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG 0x1204
+#define MVPP22_SMI_POLLING_EN BIT(10)
+
+#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -335,15 +402,9 @@
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
-/* Maximum number of RXQs used by single port */
-#define MVPP2_MAX_RXQ 8
-
/* Dfault number of RXQs in use */
#define MVPP2_DEFAULT_RXQ 4
-/* Total number of RXQs available to all ports */
-#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
-
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD 128
@@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast {
*/
#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
+#define MVPP21_ADDR_SPACE_SZ 0
+#define MVPP22_ADDR_SPACE_SZ SZ_64K
+
+#define MVPP2_MAX_CPUS 4
+
enum mvpp2_bm_type {
MVPP2_BM_FREE,
MVPP2_BM_SWF_LONG,
@@ -626,12 +692,19 @@ enum mvpp2_bm_type {
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
- void __iomem *base;
void __iomem *lms_base;
+ void __iomem *iface_base;
+
+ /* On PPv2.2, each CPU can access the base register through a
+ * separate address space, each 64 KB apart from each
+ * other.
+ */
+ void __iomem *cpu_base[MVPP2_MAX_CPUS];
/* Common clocks */
struct clk *pp_clk;
struct clk *gop_clk;
+ struct clk *mg_clk;
/* List of pointers to port structures */
struct mvpp2_port **port_list;
@@ -649,6 +722,12 @@ struct mvpp2 {
/* Tclk value */
u32 tclk;
+
+ /* HW version */
+ enum { MVPP21, MVPP22 } hw_version;
+
+ /* Maximum number of RXQs per port */
+ unsigned int max_port_rxqs;
};
struct mvpp2_pcpu_stats {
@@ -670,6 +749,11 @@ struct mvpp2_port_pcpu {
struct mvpp2_port {
u8 id;
+ /* Index of the port from the "group of ports" complex point
+ * of view
+ */
+ int gop_id;
+
int irq;
struct mvpp2 *priv;
@@ -741,22 +825,24 @@ struct mvpp2_port {
#define MVPP2_RXD_L3_IP6 BIT(30)
#define MVPP2_RXD_BUF_HDR BIT(31)
-struct mvpp2_tx_desc {
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* physical addr of transmitted buffer */
+ u32 buf_dma_addr; /* physical addr of transmitted buffer */
u32 buf_cookie; /* cookie for access to TX buffer in tx path */
u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
u32 reserved2; /* reserved (for future use) */
};
-struct mvpp2_rx_desc {
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
u32 status; /* info about received packet */
u16 reserved1; /* parser_info (for future use, PnC) */
u16 data_size; /* size of received packet in bytes */
- u32 buf_phys_addr; /* physical address of the buffer */
+ u32 buf_dma_addr; /* physical address of the buffer */
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved2; /* gem_port_id (for future use, PON) */
u16 reserved3; /* csum_l4 (for future use, PnC) */
@@ -767,12 +853,51 @@ struct mvpp2_rx_desc {
u32 reserved8;
};
+/* HW TX descriptor for PPv2.2 */
+struct mvpp22_tx_desc {
+ u32 command;
+ u8 packet_offset;
+ u8 phys_txq;
+ u16 data_size;
+ u64 reserved1;
+ u64 buf_dma_addr_ptp;
+ u64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 */
+struct mvpp22_rx_desc {
+ u32 status;
+ u16 reserved1;
+ u16 data_size;
+ u32 reserved2;
+ u32 reserved3;
+ u64 buf_dma_addr_key_hash;
+ u64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+ union {
+ struct mvpp21_tx_desc pp21;
+ struct mvpp22_tx_desc pp22;
+ };
+};
+
+struct mvpp2_rx_desc {
+ union {
+ struct mvpp21_rx_desc pp21;
+ struct mvpp22_rx_desc pp22;
+ };
+};
+
struct mvpp2_txq_pcpu_buf {
/* Transmitted SKB */
struct sk_buff *skb;
/* Physical address of transmitted buffer */
- dma_addr_t phys;
+ dma_addr_t dma;
/* Size transmitted */
size_t size;
@@ -825,7 +950,7 @@ struct mvpp2_tx_queue {
struct mvpp2_tx_desc *descs;
/* DMA address of the Tx DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last Tx DMA descriptor */
int last_desc;
@@ -848,7 +973,7 @@ struct mvpp2_rx_queue {
struct mvpp2_rx_desc *descs;
/* DMA address of the RX DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last RX DMA descriptor */
int last_desc;
@@ -912,6 +1037,8 @@ struct mvpp2_bm_pool {
/* Buffer Pointers Pool External (BPPE) size */
int size;
+ /* BPPE size in bytes */
+ int size_bytes;
/* Number of buffers for this pool */
int buf_num;
/* Pool buffer size */
@@ -922,29 +1049,13 @@ struct mvpp2_bm_pool {
/* BPPE virtual base address */
u32 *virt_addr;
- /* BPPE physical base address */
- dma_addr_t phys_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
/* Ports using BM pool */
u32 port_map;
};
-struct mvpp2_buff_hdr {
- u32 next_buff_phys_addr;
- u32 next_buff_virt_addr;
- u16 byte_count;
- u16 info;
- u8 reserved1; /* bm_qset (for future use, BM) */
-};
-
-/* Buffer header info bits */
-#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
-#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
-#define MVPP2_B_HDR_INFO_LAST_OFFS 12
-#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
-#define MVPP2_B_HDR_INFO_IS_LAST(info) \
- ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
-
/* Static declaractions */
/* Number of RXQs used by single port */
@@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ;
static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
{
- writel(data, priv->base + offset);
+ writel(data, priv->cpu_base[0] + offset);
}
static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
{
- return readl(priv->base + offset);
+ return readl(priv->cpu_base[0] + offset);
+}
+
+/* These accessors should be used to access:
+ *
+ * - per-CPU registers, where each CPU has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific CPU
+ * window, because they are related to an access to a per-CPU
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel(data, priv->cpu_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl(priv->cpu_base[cpu] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.buf_dma_addr;
+ else
+ return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ if (port->priv->hw_version == MVPP21) {
+ tx_desc->pp21.buf_dma_addr = dma_addr;
+ } else {
+ u64 val = (u64)dma_addr;
+
+ tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+ tx_desc->pp22.buf_dma_addr_ptp |= val;
+ }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.data_size;
+ else
+ return tx_desc->pp22.data_size;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.data_size = size;
+ else
+ tx_desc->pp22.data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.phys_txq = txq;
+ else
+ tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.command = command;
+ else
+ tx_desc->pp22.command = command;
+}
+
+static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int offset)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.packet_offset = offset;
+ else
+ tx_desc->pp22.packet_offset = offset;
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.packet_offset;
+ else
+ return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_dma_addr;
+ else
+ return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_cookie;
+ else
+ return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.data_size;
+ else
+ return rx_desc->pp22.data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.status;
+ else
+ return rx_desc->pp22.status;
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
txq_pcpu->txq_get_index = 0;
}
-static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_put_index;
tx_buf->skb = skb;
- tx_buf->size = tx_desc->data_size;
- tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+ tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+ tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+ mvpp2_txdesc_offset_get(port, tx_desc);
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -1411,7 +1689,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = MVPP2_PE_DROP_ALL;
@@ -1448,7 +1726,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = MVPP2_PE_MAC_PROMISCUOUS;
@@ -1494,7 +1772,7 @@ static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = index;
@@ -1546,7 +1824,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = tid;
@@ -1609,7 +1887,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = tid;
@@ -1743,10 +2021,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
if (tid <= tid_aux) {
ret = -EINVAL;
- goto error;
+ goto free_pe;
}
- memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+ memset(pe, 0, sizeof(*pe));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
pe->index = tid;
@@ -1775,8 +2053,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
-
-error:
+free_pe:
kfree(pe);
return ret;
@@ -1861,7 +2138,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
if (ai < 0) {
ret = ai;
- goto error;
+ goto free_pe;
}
/* Get first single/triple vlan tid */
@@ -1884,10 +2161,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
if (tid >= tid_aux) {
ret = -ERANGE;
- goto error;
+ goto free_pe;
}
- memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(pe, 0, sizeof(*pe));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
pe->index = tid;
@@ -1911,8 +2188,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
/* Update ports' mask */
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
-
-error:
+free_pe:
kfree(pe);
return ret;
}
@@ -1934,7 +2210,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
@@ -1992,7 +2268,7 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
@@ -2048,7 +2324,7 @@ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2087,7 +2363,7 @@ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2147,7 +2423,7 @@ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
int port;
for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
@@ -2169,7 +2445,7 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
pe.index = MVPP2_PE_MH_DEFAULT;
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
@@ -2192,7 +2468,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
/* Non-promiscuous mode for all ports - DROP unknown packets */
pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
@@ -2253,7 +2529,7 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
/* Set default entry, in case DSA or EDSA tag not found */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = MVPP2_PE_DSA_DEFAULT;
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
@@ -2283,7 +2559,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2309,7 +2585,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2339,7 +2615,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2373,7 +2649,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2438,7 +2714,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2535,7 +2811,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
return err;
/* Set default double vlan entry */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
pe.index = MVPP2_PE_VLAN_DBL;
@@ -2555,7 +2831,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Set default vlan none entry */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
pe.index = MVPP2_PE_VLAN_NONE;
@@ -2585,7 +2861,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2635,7 +2911,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2662,7 +2938,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2720,7 +2996,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
return err;
/* Default IPv4 entry for unknown protocols */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
@@ -2745,7 +3021,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Default IPv4 entry for unicast address */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
@@ -2813,7 +3089,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2834,7 +3110,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Default IPv6 entry for unknown protocols */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = MVPP2_PE_IP6_PROTO_UN;
@@ -2927,7 +3203,7 @@ static int mvpp2_prs_default_init(struct platform_device *pdev,
mvpp2_prs_hw_inv(priv, index);
priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(struct mvpp2_prs_shadow),
+ sizeof(*priv->prs_shadow),
GFP_KERNEL);
if (!priv->prs_shadow)
return -ENOMEM;
@@ -3378,27 +3654,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
- int size_bytes;
u32 val;
- size_bytes = sizeof(u32) * size;
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
- &bm_pool->phys_addr,
+ /* Number of buffer pointers must be a multiple of 16, as per
+ * hardware constraints
+ */
+ if (!IS_ALIGNED(size, 16))
+ return -EINVAL;
+
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ * bytes per buffer pointer
+ */
+ if (priv->hw_version == MVPP21)
+ bm_pool->size_bytes = 2 * sizeof(u32) * size;
+ else
+ bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+ &bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
return -ENOMEM;
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
- bm_pool->phys_addr);
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr, bm_pool->dma_addr);
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
- bm_pool->phys_addr);
+ lower_32_bits(bm_pool->dma_addr));
mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
@@ -3426,6 +3714,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
}
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *dma_addr,
+ phys_addr_t *phys_addr)
+{
+ int cpu = smp_processor_id();
+
+ *dma_addr = mvpp2_percpu_read(priv, cpu,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+
+ if (priv->hw_version == MVPP22) {
+ u32 val;
+ u32 dma_addr_highbits, phys_addr_highbits;
+
+ val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+ phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+ MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+ if (sizeof(dma_addr_t) == 8)
+ *dma_addr |= (u64)dma_addr_highbits << 32;
+
+ if (sizeof(phys_addr_t) == 8)
+ *phys_addr |= (u64)phys_addr_highbits << 32;
+ }
+}
+
/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
@@ -3433,21 +3749,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
int i;
for (i = 0; i < bm_pool->buf_num; i++) {
- dma_addr_t buf_phys_addr;
- unsigned long vaddr;
+ dma_addr_t buf_dma_addr;
+ phys_addr_t buf_phys_addr;
+ void *data;
- /* Get buffer virtual address (indirect access) */
- buf_phys_addr = mvpp2_read(priv,
- MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+ mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+ &buf_dma_addr, &buf_phys_addr);
- dma_unmap_single(dev, buf_phys_addr,
+ dma_unmap_single(dev, buf_dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
- if (!vaddr)
+ data = (void *)phys_to_virt(buf_phys_addr);
+ if (!data)
break;
- mvpp2_frag_free(bm_pool, (void *)vaddr);
+ mvpp2_frag_free(bm_pool, data);
}
/* Update BM driver with number of buffers removed from pool */
@@ -3471,9 +3787,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
- dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr,
- bm_pool->phys_addr);
+ bm_pool->dma_addr);
return 0;
}
@@ -3515,7 +3831,7 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
/* Allocate and initialize BM pools */
priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
- sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+ sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
@@ -3529,17 +3845,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_LONG_MASK;
- val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
- MVPP2_RXQ_POOL_LONG_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_LONG_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_LONG_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
@@ -3547,40 +3866,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
int lrxq, int short_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
- val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
- MVPP2_RXQ_POOL_SHORT_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_SHORT_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_SHORT_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
static void *mvpp2_buf_alloc(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_phys_addr,
+ dma_addr_t *buf_dma_addr,
+ phys_addr_t *buf_phys_addr,
gfp_t gfp_mask)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
void *data;
data = mvpp2_frag_alloc(bm_pool);
if (!data)
return NULL;
- phys_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
mvpp2_frag_free(bm_pool, data);
return NULL;
}
- *buf_phys_addr = phys_addr;
+ *buf_dma_addr = dma_addr;
+ *buf_phys_addr = virt_to_phys(data);
return data;
}
@@ -3604,37 +3928,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr)
+ dma_addr_t buf_dma_addr,
+ phys_addr_t buf_phys_addr)
{
- mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
- mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
-}
+ int cpu = smp_processor_id();
-/* Release multicast buffer */
-static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr,
- int mc_id)
-{
- u32 val = 0;
+ if (port->priv->hw_version == MVPP22) {
+ u32 val = 0;
+
+ if (sizeof(dma_addr_t) == 8)
+ val |= upper_32_bits(buf_dma_addr) &
+ MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
- val |= (mc_id & MVPP2_BM_MC_ID_MASK);
- mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+ if (sizeof(phys_addr_t) == 8)
+ val |= (upper_32_bits(buf_phys_addr)
+ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+ MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_bm_pool_put(port, pool,
- buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
- buf_virt_addr);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ }
+
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
}
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- dma_addr_t phys_addr,
- unsigned long cookie)
+ dma_addr_t dma_addr,
+ phys_addr_t phys_addr)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
- mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
/* Allocate buffers for the pool */
@@ -3642,7 +3975,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i, buf_size, total_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
@@ -3657,12 +3991,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
}
for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+ &phys_addr, GFP_KERNEL);
if (!buf)
break;
- mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
- (unsigned long)buf);
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+ phys_addr);
}
/* Update BM driver with number of buffers added to pool */
@@ -3830,7 +4165,8 @@ static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
/* Unmask the current CPU's Rx/Tx interrupts */
@@ -3838,17 +4174,46 @@ static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
- (MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id),
+ (MVPP2_CAUSE_MISC_SUM_MASK |
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
/* Port configuration routines */
+static void mvpp22_port_mii_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ return;
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+ writel(val, port->base + MVPP22_XLG_CTRL3_REG);
+ }
+
+ val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ else
+ val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ val |= MVPP22_CTRL4_SYNC_BYPASS;
+ val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+}
+
static void mvpp2_port_mii_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->priv->hw_version == MVPP22)
+ mvpp22_port_mii_set(port);
+
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
switch (port->phy_interface) {
@@ -3952,16 +4317,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
{
int tx_port_num, val, queue, ptxq, lrxq;
- /* Configure port to loopback if needed */
- if (port->flags & MVPP2_F_LOOPBACK)
- mvpp2_port_loopback_set(port);
+ if (port->priv->hw_version == MVPP21) {
+ /* Configure port to loopback if needed */
+ if (port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port);
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ /* Min. TX threshold must be less than minimal packet length */
+ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ }
/* Disable Legacy WRR, Disable EJP, Release from reset */
tx_port_num = mvpp2_egress_port(port);
@@ -4048,7 +4415,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port)
for (queue = 0; queue < txq_number; queue++) {
struct mvpp2_tx_queue *txq = port->txqs[queue];
- if (txq->descs != NULL)
+ if (txq->descs)
qmap |= (1 << queue);
}
@@ -4149,11 +4516,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
}
/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
{
- int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
int cpu = smp_processor_id();
+ int pool;
+
+ pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
+ MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
@@ -4161,18 +4532,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
/* Tx descriptors helper methods */
-/* Get number of Tx descriptors waiting to be transmitted by HW */
-static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- u32 val;
-
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
-
- return val & MVPP2_TXQ_PENDING_MASK;
-}
-
/* Get pointer to next Tx descriptor to be processed (send) by HW */
static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
@@ -4187,7 +4546,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -4216,11 +4576,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
struct mvpp2_tx_queue *txq, int num)
{
u32 val;
+ int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -4321,7 +4682,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+ val = mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -4335,7 +4697,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
for (queue = 0; queue < txq_number; queue++) {
int id = port->txqs[queue]->id;
- mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+ mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(id));
}
}
@@ -4394,12 +4757,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu = smp_processor_id();
+
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
- rxq->pkts_coal);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4449,7 +4814,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@@ -4527,10 +4892,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
int desc_num, int cpu,
struct mvpp2 *priv)
{
+ u32 txq_dma;
+
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
desc_num * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_phys, GFP_KERNEL);
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -4540,10 +4907,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
MVPP2_AGGR_TXQ_INDEX_REG(cpu));
- /* Set Tx descriptors queue starting address */
- /* indirect access */
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
- aggr_txq->descs_phys);
+ /* Set Tx descriptors queue starting address indirect
+ * access
+ */
+ if (priv->hw_version == MVPP21)
+ txq_dma = aggr_txq->descs_dma;
+ else
+ txq_dma = aggr_txq->descs_dma >>
+ MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
return 0;
@@ -4554,12 +4927,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ u32 rxq_dma;
+ int cpu;
+
rxq->size = port->rx_ring_size;
/* Allocate memory for RX descriptors */
rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
- &rxq->descs_phys, GFP_KERNEL);
+ &rxq->descs_dma, GFP_KERNEL);
if (!rxq->descs)
return -ENOMEM;
@@ -4569,10 +4945,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ if (port->priv->hw_version == MVPP21)
+ rxq_dma = rxq->descs_dma;
+ else
+ rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
/* Set Offset */
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4599,10 +4980,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
for (i = 0; i < rx_received; i++) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 bm = mvpp2_bm_cookie_build(rx_desc);
+ u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
@@ -4611,26 +4993,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu;
+
mvpp2_rxq_drop_pkts(port, rxq);
if (rxq->descs)
dma_free_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
rxq->descs,
- rxq->descs_phys);
+ rxq->descs_dma);
rxq->descs = NULL;
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
- rxq->descs_phys = 0;
+ rxq->descs_dma = 0;
/* Clear Rx descriptors queue starting address and size;
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
}
/* Create and initialize a Tx queue */
@@ -4646,23 +5031,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
/* Allocate memory for Tx descriptors */
txq->descs = dma_alloc_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- &txq->descs_phys, GFP_KERNEL);
+ &txq->descs_dma, GFP_KERNEL);
if (!txq->descs)
return -ENOMEM;
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
- MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
- txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ txq->descs_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
@@ -4673,9 +5060,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
(txq->log_id * desc_per_txq);
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
- MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
- MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
/* WRR / EJP configuration - indirect access */
tx_port_num = mvpp2_egress_port(port);
@@ -4694,11 +5081,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
txq_pcpu->size = txq->size;
- txq_pcpu->buffs = kmalloc(txq_pcpu->size *
- sizeof(struct mvpp2_txq_pcpu_buf),
- GFP_KERNEL);
+ txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
+ sizeof(*txq_pcpu->buffs),
+ GFP_KERNEL);
if (!txq_pcpu->buffs)
- goto error;
+ goto cleanup;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4707,8 +5094,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
-
-error:
+cleanup:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->buffs);
@@ -4716,7 +5102,7 @@ error:
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
return -ENOMEM;
}
@@ -4736,20 +5122,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
if (txq->descs)
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
- txq->descs_phys = 0;
+ txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
}
/* Cleanup Tx ports */
@@ -4759,10 +5146,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
int delay, pending, cpu;
u32 val;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
@@ -4778,11 +5166,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
mdelay(1);
delay++;
- pending = mvpp2_txq_pend_desc_num_get(port, txq);
+ pending = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_TXQ_PENDING_REG);
+ pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -4991,20 +5381,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
static void mvpp2_rx_error(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- u32 status = rx_desc->status;
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
+ status, sz);
break;
}
}
@@ -5031,15 +5422,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, u32 bm)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
+ GFP_ATOMIC);
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
return 0;
}
@@ -5075,43 +5468,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
}
-static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- struct mvpp2_buff_hdr *buff_hdr;
- struct sk_buff *skb;
- u32 rx_status = rx_desc->status;
- dma_addr_t buff_phys_addr;
- unsigned long buff_virt_addr;
- dma_addr_t buff_phys_addr_next;
- unsigned long buff_virt_addr_next;
- int mc_id;
- int pool_id;
-
- pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
- buff_phys_addr = rx_desc->buf_phys_addr;
- buff_virt_addr = rx_desc->buf_cookie;
-
- do {
- skb = (struct sk_buff *)buff_virt_addr;
- buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
-
- mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
-
- buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
- buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
-
- /* Release buffer */
- mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
- buff_virt_addr, mc_id);
-
- buff_phys_addr = buff_phys_addr_next;
- buff_virt_addr = buff_virt_addr_next;
-
- } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
-}
-
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_queue *rxq)
@@ -5132,25 +5488,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
unsigned int frag_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
void *data;
rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
- phys_addr = rx_desc->buf_phys_addr;
- data = (void *)(uintptr_t)rx_desc->buf_cookie;
-
- bm = mvpp2_bm_cookie_build(rx_desc);
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+
+ bm = mvpp2_bm_cookie_build(port, rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
bm_pool = &port->priv->bm_pools[pool];
- /* Check if buffer header is used */
- if (rx_status & MVPP2_RXD_BUF_HDR) {
- mvpp2_buff_hdr_rx(port, rx_desc);
- continue;
- }
/* In case of an error, release the requested buffer pointer
* to the Buffer Manager. This request process is controlled
@@ -5158,13 +5512,11 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
* comprised by the RX descriptor.
*/
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
- err_drop_frame:
+err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
-
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
continue;
}
@@ -5185,7 +5537,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, phys_addr,
+ dma_unmap_single(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
rcvd_pkts++;
@@ -5216,11 +5568,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
}
static inline void
-tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- dma_unmap_single(dev, desc->buf_phys_addr,
- desc->data_size, DMA_TO_DEVICE);
+ dma_addr_t buf_dma_addr =
+ mvpp2_txdesc_dma_addr_get(port, desc);
+ size_t buf_sz =
+ mvpp2_txdesc_size_get(port, desc);
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@@ -5232,47 +5588,49 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
struct mvpp2_tx_desc *tx_desc;
int i;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = frag->size;
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, frag->size);
- buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
- tx_desc->data_size,
+ buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+ frag->size,
DMA_TO_DEVICE);
- if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+ if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
- goto error;
+ goto cleanup;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
- tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L_DESC);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
- tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
}
}
return 0;
-
-error:
+cleanup:
/* Release all descriptors that were used to map fragments of
* this packet, as well as the corresponding DMA mappings
*/
for (i = i - 1; i >= 0; i--) {
tx_desc = txq->descs + i;
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
}
return -ENOMEM;
@@ -5285,7 +5643,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
struct mvpp2_tx_queue *txq, *aggr_txq;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
@@ -5307,35 +5665,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = skb_headlen(skb);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
- buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
- tx_desc->data_size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+ buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
mvpp2_txq_desc_put(txq);
frags = 0;
goto out;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
tx_cmd = mvpp2_skb_tx_csum(port, skb);
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
frags = 0;
goto out;
}
@@ -5396,6 +5757,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx, cause_rx, cause_misc;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
+ int cpu = smp_processor_id();
/* Rx/Tx cause register
*
@@ -5407,8 +5769,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+ cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -5417,8 +5779,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Clear the cause register */
mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
@@ -5530,7 +5893,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
return 0;
}
-static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
{
u32 mac_addr_l, mac_addr_m, mac_addr_h;
@@ -5698,7 +6061,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data)) {
err = -EADDRNOTAVAIL;
- goto error;
+ goto log_error;
}
if (!netif_running(dev)) {
@@ -5708,7 +6071,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
/* Reconfigure parser to accept the original MAC address */
err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
if (err)
- goto error;
+ goto log_error;
}
mvpp2_stop_dev(port);
@@ -5720,15 +6083,14 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
/* Reconfigure parser accept the original MAC address */
err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
if (err)
- goto error;
+ goto log_error;
out_start:
mvpp2_start_dev(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
return 0;
-
-error:
- netdev_err(dev, "fail to change MAC address\n");
+log_error:
+ netdev_err(dev, "failed to change MAC address\n");
return err;
}
@@ -5753,7 +6115,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
/* Reconfigure BM to the original MTU */
err = mvpp2_bm_update_mtu(dev, dev->mtu);
if (err)
- goto error;
+ goto log_error;
}
mvpp2_stop_dev(port);
@@ -5767,7 +6129,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
/* Reconfigure BM to the original MTU */
err = mvpp2_bm_update_mtu(dev, dev->mtu);
if (err)
- goto error;
+ goto log_error;
out_start:
mvpp2_start_dev(port);
@@ -5775,9 +6137,8 @@ out_start:
mvpp2_ingress_enable(port);
return 0;
-
-error:
- netdev_err(dev, "fail to change MTU\n");
+log_error:
+ netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -5946,7 +6307,7 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
err_clean_rxqs:
mvpp2_cleanup_rxqs(port);
err_out:
- netdev_err(dev, "fail to change ring parameters");
+ netdev_err(dev, "failed to change ring parameters");
return err;
}
@@ -5975,16 +6336,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-/* Driver initialization */
-
-static void mvpp2_port_power_up(struct mvpp2_port *port)
-{
- mvpp2_port_mii_set(port);
- mvpp2_port_periodic_xon_disable(port);
- mvpp2_port_fc_adv_enable(port);
- mvpp2_port_reset(port);
-}
-
/* Initialize port HW */
static int mvpp2_port_init(struct mvpp2_port *port)
{
@@ -5993,7 +6344,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2_txq_pcpu *txq_pcpu;
int queue, cpu, err;
- if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+ if (port->first_rxq + rxq_number >
+ MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
/* Disable port */
@@ -6061,7 +6413,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
}
/* Configure Rx queue group interrupt for this port */
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+ rxq_number);
+ } else {
+ u32 val;
+
+ val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -6103,8 +6466,7 @@ err_free_percpu:
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node,
- struct mvpp2 *priv,
- int *next_first_rxq)
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
struct mvpp2_port *port;
@@ -6117,11 +6479,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
u32 id;
int features;
int phy_mode;
- int priv_common_regs_num = 2;
int err, i, cpu;
- dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
- rxq_number);
+ dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number);
if (!dev)
return -ENOMEM;
@@ -6163,16 +6523,30 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->priv = priv;
port->id = id;
- port->first_rxq = *next_first_rxq;
+ if (priv->hw_version == MVPP21)
+ port->first_rxq = port->id * rxq_number;
+ else
+ port->first_rxq = port->id * priv->max_port_rxqs;
+
port->phy_node = phy_node;
port->phy_interface = phy_mode;
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- priv_common_regs_num + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(port->base)) {
- err = PTR_ERR(port->base);
- goto err_free_irq;
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->base)) {
+ err = PTR_ERR(port->base);
+ goto err_free_irq;
+ }
+ } else {
+ if (of_property_read_u32(port_node, "gop-port-id",
+ &port->gop_id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing gop-port-id value\n");
+ goto err_free_irq;
+ }
+
+ port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
}
/* Alloc per-cpu stats */
@@ -6187,7 +6561,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mac_from = "device tree";
ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
- mvpp2_get_mac_address(port, hw_mac_addr);
+ if (priv->hw_version == MVPP21)
+ mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
ether_addr_copy(dev->dev_addr, hw_mac_addr);
@@ -6207,7 +6582,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to init port %d\n", id);
goto err_free_stats;
}
- mvpp2_port_power_up(port);
+
+ mvpp2_port_mii_set(port);
+ mvpp2_port_periodic_xon_disable(port);
+
+ if (priv->hw_version == MVPP21)
+ mvpp2_port_fc_adv_enable(port);
+
+ mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
@@ -6245,8 +6627,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- /* Increment the first Rx queue number to be used by the next port */
- *next_first_rxq += rxq_number;
priv->port_list[id] = port;
return 0;
@@ -6330,6 +6710,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+ u32 val, rdval, wrval;
+
+ mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+ /* AXI Bridge Configuration */
+
+ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ /* BM */
+ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+ /* Descriptors */
+ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+ /* Buffer Data */
+ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+ val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+ mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+ mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
/* Initialize network controller common part HW */
static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
{
@@ -6338,7 +6772,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
u32 val;
/* Checks for hardware constraints */
- if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+ if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
(txq_number > MVPP2_MAX_TXQ)) {
dev_err(&pdev->dev, "invalid queue size parameter\n");
return -EINVAL;
@@ -6349,14 +6783,23 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
+ if (priv->hw_version == MVPP22)
+ mvpp2_axi_init(priv);
+
/* Disable HW PHY polling */
- val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
- val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
- writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ if (priv->hw_version == MVPP21) {
+ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+ writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ } else {
+ val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ val &= ~MVPP22_SMI_POLLING_EN;
+ writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ }
/* Allocate and initialize aggregated TXQs */
priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
- sizeof(struct mvpp2_tx_queue),
+ sizeof(*priv->aggr_txqs),
GFP_KERNEL);
if (!priv->aggr_txqs)
return -ENOMEM;
@@ -6374,11 +6817,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_rx_fifo_init(priv);
/* Reset Rx queue group interrupt configuration */
- for (i = 0; i < MVPP2_MAX_PORTS; i++)
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
+ rxq_number);
+ continue;
+ } else {
+ u32 val;
+
+ val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
+ }
- writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
- priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+ if (priv->hw_version == MVPP21)
+ writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+ priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
/* Allow cache snoop when transmiting packets */
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
@@ -6405,22 +6862,46 @@ static int mvpp2_probe(struct platform_device *pdev)
struct device_node *port_node;
struct mvpp2 *priv;
struct resource *res;
- int port_count, first_rxq;
+ void __iomem *base;
+ int port_count, cpu;
int err;
- priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->lms_base))
+ return PTR_ERR(priv->lms_base);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iface_base))
+ return PTR_ERR(priv->iface_base);
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->lms_base))
- return PTR_ERR(priv->lms_base);
+ for_each_present_cpu(cpu) {
+ u32 addr_space_sz;
+
+ addr_space_sz = (priv->hw_version == MVPP21 ?
+ MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+ priv->cpu_base[cpu] = base + cpu * addr_space_sz;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
if (IS_ERR(priv->pp_clk))
@@ -6438,42 +6919,70 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
+
/* Get system's tclk rate */
priv->tclk = clk_get_rate(priv->pp_clk);
+ if (priv->hw_version == MVPP22) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto err_mg_clk;
+ /* Sadly, the BM pools all share the same register to
+ * store the high 32 bits of their address. So they
+ * must all have the same high 32 bits, which forces
+ * us to restrict coherent memory to DMA_BIT_MASK(32).
+ */
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_mg_clk;
+ }
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
dev_err(&pdev->dev, "failed to initialize controller\n");
- goto err_gop_clk;
+ goto err_mg_clk;
}
port_count = of_get_available_child_count(dn);
if (port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
- goto err_gop_clk;
+ goto err_mg_clk;
}
priv->port_list = devm_kcalloc(&pdev->dev, port_count,
- sizeof(struct mvpp2_port *),
- GFP_KERNEL);
+ sizeof(*priv->port_list),
+ GFP_KERNEL);
if (!priv->port_list) {
err = -ENOMEM;
- goto err_gop_clk;
+ goto err_mg_clk;
}
/* Initialize ports */
- first_rxq = 0;
for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+ err = mvpp2_port_probe(pdev, port_node, priv);
if (err < 0)
- goto err_gop_clk;
+ goto err_mg_clk;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_mg_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
@@ -6506,9 +7015,10 @@ static int mvpp2_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
aggr_txq->descs,
- aggr_txq->descs_phys);
+ aggr_txq->descs_dma);
}
+ clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
@@ -6516,7 +7026,14 @@ static int mvpp2_remove(struct platform_device *pdev)
}
static const struct of_device_id mvpp2_match[] = {
- { .compatible = "marvell,armada-375-pp2" },
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = (void *)MVPP21,
+ },
+ {
+ .compatible = "marvell,armada-7k-pp22",
+ .data = (void *)MVPP22,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 28cb36d9e50a..993724959a7c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -556,11 +556,11 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* function.Driver can dynamically switch to them if the 1/2kB hash
* table is full.
*/
- if (pep->htpr == NULL) {
+ if (!pep->htpr) {
pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
HASH_ADDR_TABLE_SIZE,
&pep->htpr_dma, GFP_KERNEL);
- if (pep->htpr == NULL)
+ if (!pep->htpr)
return -ENOMEM;
} else {
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
@@ -1036,8 +1036,7 @@ static int rxq_init(struct net_device *dev)
int rx_desc_num = pep->rx_ring_size;
/* Allocate RX skb rings */
- pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
- GFP_KERNEL);
+ pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
if (!pep->rx_skb)
return -ENOMEM;
@@ -1096,8 +1095,7 @@ static int txq_init(struct net_device *dev)
int size = 0, i = 0;
int tx_desc_num = pep->tx_ring_size;
- pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
- GFP_KERNEL);
+ pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
if (!pep->tx_skb)
return -ENOMEM;
@@ -1358,7 +1356,7 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
- if (dev->phydev != NULL)
+ if (dev->phydev)
return phy_mii_ioctl(dev->phydev, ifr, cmd);
return -EOPNOTSUPP;
@@ -1503,7 +1501,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->timeout.data = (unsigned long)pep;
pep->smi_bus = mdiobus_alloc();
- if (pep->smi_bus == NULL) {
+ if (!pep->smi_bus) {
err = -ENOMEM;
goto err_netdev;
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index edb95271a4f2..5d7d94de4e00 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2657,7 +2657,7 @@ static int skge_down(struct net_device *dev)
struct skge_hw *hw = skge->hw;
int port = skge->port;
- if (skge->mem == NULL)
+ if (!skge->mem)
return 0;
netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
@@ -3718,7 +3718,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring:\n");
+ seq_puts(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b2cc3f3ca10..1145cde2274a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4544,7 +4544,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
sky2_read32(hw, B0_Y2_SP_ICR));
if (!netif_running(dev)) {
- seq_printf(seq, "network not running\n");
+ seq_puts(seq, "network not running\n");
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 93949139e62c..16f97552ae98 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1849,6 +1849,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* GE2, Force 1000M/FD, FC ON */
mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+ /* Indicates CDM to parse the MTK special tag from CPU
+ * which also is working out for untag packets.
+ */
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
@@ -1911,10 +1917,9 @@ static int __init mtk_init(struct net_device *dev)
/* If the mac address is invalid, use random mac address */
if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
- dev->addr_assign_type = NET_ADDR_RANDOM;
}
return mtk_phy_connect(dev);
@@ -2320,6 +2325,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 08285a96ff70..3c46a3b613b9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -70,6 +70,10 @@
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL 0x1400
+#define MTK_CDMQ_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c4d714fcc7da..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* port statistics */
"tso_packets",
"xmit_more",
- "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* pf statistics */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 61420473fe5f..94fab20ef146 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -92,7 +92,9 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return mlx4_en_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 9166d90e7328..e0eb695318e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
+ priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
@@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+ priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 867292880c07..aa074e57ce06 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -50,173 +50,62 @@
#include "mlx4_en.h"
-static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *page_alloc,
- const struct mlx4_en_frag_info *frag_info,
- gfp_t _gfp)
+static int mlx4_alloc_page(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag,
+ gfp_t gfp)
{
- int order;
struct page *page;
dma_addr_t dma;
- for (order = frag_info->order; ;) {
- gfp_t gfp = _gfp;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
- page = alloc_pages(gfp, order);
- if (likely(page))
- break;
- if (--order < 0 ||
- ((PAGE_SIZE << order) < frag_info->frag_size))
- return -ENOMEM;
- }
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
- frag_info->dma_dir);
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ return -ENOMEM;
+ dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- put_page(page);
+ __free_page(page);
return -ENOMEM;
}
- page_alloc->page_size = PAGE_SIZE << order;
- page_alloc->page = page;
- page_alloc->dma = dma;
- page_alloc->page_offset = 0;
- /* Not doing get_page() for each frag is a big win
- * on asymetric workloads. Note we can not use atomic_set().
- */
- page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
+ frag->page = page;
+ frag->dma = dma;
+ frag->page_offset = priv->rx_headroom;
return 0;
}
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
- struct mlx4_en_rx_alloc *ring_alloc,
gfp_t gfp)
{
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
- const struct mlx4_en_frag_info *frag_info;
- struct page *page;
int i;
- for (i = 0; i < priv->num_frags; i++) {
- frag_info = &priv->frag_info[i];
- page_alloc[i] = ring_alloc[i];
- page_alloc[i].page_offset += frag_info->frag_stride;
-
- if (page_alloc[i].page_offset + frag_info->frag_stride <=
- ring_alloc[i].page_size)
- continue;
-
- if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
- frag_info, gfp)))
- goto out;
- }
-
- for (i = 0; i < priv->num_frags; i++) {
- frags[i] = ring_alloc[i];
- frags[i].page_offset += priv->frag_info[i].rx_headroom;
- rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
- frags[i].page_offset);
- ring_alloc[i] = page_alloc[i];
- }
-
- return 0;
-
-out:
- while (i--) {
- if (page_alloc[i].page != ring_alloc[i].page) {
- dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc[i].page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc[i].page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
+ for (i = 0; i < priv->num_frags; i++, frags++) {
+ if (!frags->page) {
+ if (mlx4_alloc_page(priv, frags, gfp))
+ return -ENOMEM;
+ ring->rx_alloc_pages++;
}
- }
- return -ENOMEM;
-}
-
-static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frags,
- int i)
-{
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
-
-
- if (next_frag_end > frags[i].page_size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
- frag_info->dma_dir);
-
- if (frags[i].page)
- put_page(frags[i].page);
-}
-
-static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
-{
- int i;
- struct mlx4_en_rx_alloc *page_alloc;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
- frag_info, GFP_KERNEL | __GFP_COLD))
- goto out;
-
- en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
- i, ring->page_alloc[i].page_size,
- page_ref_count(ring->page_alloc[i].page));
+ rx_desc->data[i].addr = cpu_to_be64(frags->dma +
+ frags->page_offset);
}
return 0;
-
-out:
- while (i--) {
- struct page *page;
-
- page_alloc = &ring->page_alloc[i];
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc->page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc->page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
- page_alloc->page = NULL;
- }
- return -ENOMEM;
}
-static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag)
{
- struct mlx4_en_rx_alloc *page_alloc;
- int i;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- page_alloc = &ring->page_alloc[i];
- en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
- i, page_count(page_alloc->page));
-
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, frag_info->dma_dir);
- while (page_alloc->page_offset + frag_info->frag_stride <
- page_alloc->page_size) {
- put_page(page_alloc->page);
- page_alloc->page_offset += frag_info->frag_stride;
- }
- page_alloc->page = NULL;
+ if (frag->page) {
+ dma_unmap_page(priv->ddev, frag->dma,
+ PAGE_SIZE, priv->dma_dir);
+ __free_page(frag->page);
}
+ /* We need to clear all fields, otherwise a change of priv->log_rx_info
+ * could lead to see garbage later in frag->page.
+ */
+ memset(frag, 0, sizeof(*frag));
}
-static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
@@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
-
if (ring->page_cache.index > 0) {
- frags[0] = ring->page_cache.buf[--ring->page_cache.index];
- rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
- frags[0].page_offset);
+ /* XDP uses a single page per frame */
+ if (!frags->page) {
+ ring->page_cache.index--;
+ frags->page = ring->page_cache.buf[ring->page_cache.index].page;
+ frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
+ }
+ frags->page_offset = XDP_PACKET_HEADROOM;
+ rx_desc->data[0].addr = cpu_to_be64(frags->dma +
+ XDP_PACKET_HEADROOM);
return 0;
}
- return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
-static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
{
return ring->prod == ring->cons;
}
@@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+/* slow path */
+static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
@@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags, nr);
+ mlx4_en_free_frag(priv, frags + nr);
}
}
@@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- while (!mlx4_en_is_ring_empty(ring)) {
- index = ring->cons & ring->size_mask;
+ for (index = 0; index < ring->size; index++) {
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
- ++ring->cons;
}
+ ring->cons = 0;
+ ring->prod = 0;
}
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
@@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc_node(tmp, node);
+ ring->rx_info = vzalloc_node(tmp, node);
if (!ring->rx_info) {
- ring->rx_info = vmalloc(tmp);
+ ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
goto err_ring;
@@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
-
- /* Initialize page allocators */
- err = mlx4_en_init_allocator(priv, ring);
- if (err) {
- en_err(priv, "Failed initializing ring allocator\n");
- if (ring->stride <= TXBB_SIZE)
- ring->buf -= TXBB_SIZE;
- ring_ind--;
- goto err_allocator;
- }
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -493,11 +378,9 @@ err_buffers:
mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
-err_allocator:
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
@@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
if (cache->index >= MLX4_EN_CACHE_SIZE)
return false;
- cache->buf[cache->index++] = *frame;
+ cache->buf[cache->index].page = frame->page;
+ cache->buf[cache->index].dma = frame->dma;
+ cache->index++;
return true;
}
@@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
int i;
for (i = 0; i < ring->page_cache.index; i++) {
- struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
-
- dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
- priv->frag_info[0].dma_dir);
- put_page(frame->page);
+ dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(ring->page_cache.buf[i].page);
}
ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, ring);
}
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
int length)
{
- struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
- struct mlx4_en_frag_info *frag_info;
- int nr;
+ const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+ unsigned int truesize = 0;
+ int nr, frag_size;
+ struct page *page;
dma_addr_t dma;
+ bool release;
/* Collect used fragments while replacing them in the HW descriptors */
- for (nr = 0; nr < priv->num_frags; nr++) {
- frag_info = &priv->frag_info[nr];
- if (length <= frag_info->frag_prefix_size)
- break;
- if (unlikely(!frags[nr].page))
+ for (nr = 0;; frags++) {
+ frag_size = min_t(int, length, frag_info->frag_size);
+
+ page = frags->page;
+ if (unlikely(!page))
goto fail;
- dma = be64_to_cpu(rx_desc->data[nr].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
- DMA_FROM_DEVICE);
+ dma = frags->dma;
+ dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
+ frag_size, priv->dma_dir);
+
+ __skb_fill_page_desc(skb, nr, page, frags->page_offset,
+ frag_size);
- __skb_fill_page_desc(skb, nr, frags[nr].page,
- frags[nr].page_offset,
- frag_info->frag_size);
+ truesize += frag_info->frag_stride;
+ if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ frags->page_offset ^= PAGE_SIZE / 2;
+ release = page_count(page) != 1 ||
+ page_is_pfmemalloc(page) ||
+ page_to_nid(page) != numa_mem_id();
+ } else {
+ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
- skb->truesize += frag_info->frag_stride;
- frags[nr].page = NULL;
+ frags->page_offset += sz_align;
+ release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
+ }
+ if (release) {
+ dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
+ frags->page = NULL;
+ } else {
+ page_ref_inc(page);
+ }
+
+ nr++;
+ length -= frag_size;
+ if (!length)
+ break;
+ frag_info++;
}
- /* Adjust size of last fragment to match actual length */
- if (nr > 0)
- skb_frag_size_set(&skb_frags_rx[nr - 1],
- length - priv->frag_info[nr - 1].frag_prefix_size);
+ skb->truesize += truesize;
return nr;
fail:
while (nr > 0) {
nr--;
- __skb_frag_unref(&skb_frags_rx[nr]);
+ __skb_frag_unref(skb_shinfo(skb)->frags + nr);
}
return 0;
}
-
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mlx4_en_rx_alloc *frags,
- unsigned int length)
-{
- struct sk_buff *skb;
- void *va;
- int used_frags;
- dma_addr_t dma;
-
- skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (unlikely(!skb)) {
- en_dbg(RX_ERR, priv, "Failed allocating skb\n");
- return NULL;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- skb->len = length;
-
- /* Get pointer to first fragment so we could copy the headers into the
- * (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].page_offset;
-
- if (length <= SMALL_PACKET_SIZE) {
- /* We are copying all relevant data to the skb - temporarily
- * sync buffers for the copy */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, va, length);
- skb->tail += length;
- } else {
- unsigned int pull_len;
-
- /* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
- skb, length);
- if (unlikely(!used_frags)) {
- kfree_skb(skb);
- return NULL;
- }
- skb_shinfo(skb)->nr_frags = used_frags;
-
- pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
- /* Copy headers into the skb linear buffer */
- memcpy(skb->data, va, pull_len);
- skb->tail += pull_len;
-
- /* Skip headers in first fragment */
- skb_shinfo(skb)->frags[0].page_offset += pull_len;
-
- /* Adjust size of first fragment */
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
- skb->data_len = length - pull_len;
- }
- return skb;
-}
-
-static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+static void validate_loopback(struct mlx4_en_priv *priv, void *va)
{
+ const unsigned char *data = va + ETH_HLEN;
int i;
- int offset = ETH_HLEN;
- for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
- if (*(skb->data + offset) != (unsigned char) (i & 0xff))
- goto out_loopback;
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
+ if (data[i] != (unsigned char)i)
+ return;
}
/* Loopback found */
priv->loopback_ok = 1;
-
-out_loopback:
- dev_kfree_skb_any(skb);
}
static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
@@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
- struct mlx4_en_rx_desc *rx_desc;
struct bpf_prog *xdp_prog;
int doorbell_pending;
struct sk_buff *skb;
@@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
+ void *va;
frags = ring->rx_info + (index << priv->log_rx_info);
- rx_desc = ring->buf + (index << ring->log_stride);
-
+ va = page_address(frags[0].page) + frags[0].page_offset;
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
- struct ethhdr *ethh;
+ const struct ethhdr *ethh = va;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
@@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
}
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, va);
+ goto next;
+ }
+
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
/* A bpf program gets first chance to drop the packet. It may
* read bytes but not past the end of the frag.
@@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = page_address(frags[0].page);
- xdp.data = xdp.data_hard_start + frags[0].page_offset;
+ xdp.data_hard_start = va - frags[0].page_offset;
+ xdp.data = va;
xdp.data_end = xdp.data + length;
orig_data = xdp.data;
@@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length = xdp.data_end - xdp.data;
frags[0].page_offset = xdp.data -
xdp.data_hard_start;
+ va = xdp.data;
}
switch (act) {
@@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
length, cq->ring,
- &doorbell_pending)))
- goto consumed;
+ &doorbell_pending))) {
+ frags[0].page = NULL;
+ goto next;
+ }
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
@@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
- if (likely(mlx4_en_rx_recycle(ring, frags)))
- goto consumed;
goto next;
}
}
@@ -948,129 +789,51 @@ xdp_drop_no_cnt:
ring->bytes += length;
ring->packets++;
+ skb = napi_get_frags(&cq->napi);
+ if (!skb)
+ goto next;
+
+ if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ timestamp);
+ }
+ skb_record_rx_queue(skb, cq->ring);
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
ip_summed = CHECKSUM_UNNECESSARY;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ if (l2_tunnel)
+ skb->csum_level = 1;
ring->csum_ok++;
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
} else {
if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_STATUS_IPV6))) {
- ip_summed = CHECKSUM_COMPLETE;
- ring->csum_complete++;
+ if (check_csum(cqe, skb, va, dev->features)) {
+ goto csum_none;
+ } else {
+ ip_summed = CHECKSUM_COMPLETE;
+ ring->csum_complete++;
+ }
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
}
} else {
+csum_none:
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
}
-
- /* This packet is eligible for GRO if it is:
- * - DIX Ethernet (type interpretation)
- * - TCP/IP (v4)
- * - without IP options
- * - not an IP fragment
- */
- if (dev->features & NETIF_F_GRO) {
- struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
- if (!gro_skb)
- goto next;
-
- nr = mlx4_en_complete_rx_desc(priv,
- rx_desc, frags, gro_skb,
- length);
- if (!nr)
- goto next;
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
- if (check_csum(cqe, gro_skb, va,
- dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
- ring->csum_complete--;
- }
- }
-
- skb_shinfo(gro_skb)->nr_frags = nr;
- gro_skb->len = length;
- gro_skb->data_len = length;
- gro_skb->ip_summed = ip_summed;
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- gro_skb->csum_level = 1;
-
- if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vid = be16_to_cpu(cqe->sl_vid);
-
- __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
- } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
- (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
- __vlan_hwaccel_put_tag(gro_skb,
- htons(ETH_P_8021AD),
- be16_to_cpu(cqe->sl_vid));
- }
-
- if (dev->features & NETIF_F_RXHASH)
- skb_set_hash(gro_skb,
- be32_to_cpu(cqe->immed_rss_invalid),
- (ip_summed == CHECKSUM_UNNECESSARY) ?
- PKT_HASH_TYPE_L4 :
- PKT_HASH_TYPE_L3);
-
- skb_record_rx_queue(gro_skb, cq->ring);
-
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev,
- skb_hwtstamps(gro_skb),
- timestamp);
- }
-
- napi_gro_frags(&cq->napi);
- goto next;
- }
-
- /* GRO not possible, complete processing here */
- skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (unlikely(!skb)) {
- ring->dropped++;
- goto next;
- }
-
- if (unlikely(priv->validate_loopback)) {
- validate_loopback(priv, skb);
- goto next;
- }
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- if (check_csum(cqe, skb, skb->data, dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_complete--;
- ring->csum_none++;
- }
- }
-
skb->ip_summed = ip_summed;
- skb->protocol = eth_type_trans(skb, dev);
- skb_record_rx_queue(skb, cq->ring);
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- skb->csum_level = 1;
-
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
@@ -1078,36 +841,36 @@ xdp_drop_no_cnt:
PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_L3);
- if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_CVLAN_PRESENT_MASK) &&
+
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
- else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->sl_vid));
+ else if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
- timestamp);
+ nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
+ if (likely(nr)) {
+ skb_shinfo(skb)->nr_frags = nr;
+ skb->len = length;
+ skb->data_len = length;
+ napi_gro_frags(&cq->napi);
+ } else {
+ skb->vlan_tci = 0;
+ skb_clear_hash(skb);
}
-
- napi_gro_receive(&cq->napi, skb);
next:
- for (nr = 0; nr < priv->num_frags; nr++)
- mlx4_en_free_frag(priv, frags, nr);
-
-consumed:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
if (++polled == budget)
- goto out;
+ break;
}
-out:
rcu_read_unlock();
if (polled) {
@@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return done;
}
-static const int frag_sizes[] = {
- FRAG_SZ0,
- FRAG_SZ1,
- FRAG_SZ2,
- FRAG_SZ3
-};
-
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
* This only works when num_frags == 1.
*/
if (priv->tx_ring_num[TX_XDP]) {
- priv->frag_info[0].order = 0;
priv->frag_info[0].frag_size = eff_mtu;
- priv->frag_info[0].frag_prefix_size = 0;
/* This will gain efficient xdp frame recycling at the
* expense of more costly truesize accounting
*/
priv->frag_info[0].frag_stride = PAGE_SIZE;
- priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
- priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
+ priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->rx_headroom = XDP_PACKET_HEADROOM;
i = 1;
} else {
- int buf_size = 0;
+ int frag_size_max = 2048, buf_size = 0;
+
+ /* should not happen, right ? */
+ if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
+ frag_size_max = PAGE_SIZE;
while (buf_size < eff_mtu) {
- priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER;
- priv->frag_info[i].frag_size =
- (eff_mtu > buf_size + frag_sizes[i]) ?
- frag_sizes[i] : eff_mtu - buf_size;
- priv->frag_info[i].frag_prefix_size = buf_size;
- priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size,
- SMP_CACHE_BYTES);
- priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
- priv->frag_info[i].rx_headroom = 0;
- buf_size += priv->frag_info[i].frag_size;
+ int frag_stride, frag_size = eff_mtu - buf_size;
+ int pad, nb;
+
+ if (i < MLX4_EN_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ priv->frag_info[i].frag_size = frag_size;
+ frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
+ /* We can only pack 2 1536-bytes frames in on 4K page
+ * Therefore, each frame would consume more bytes (truesize)
+ */
+ nb = PAGE_SIZE / frag_stride;
+ pad = (PAGE_SIZE - nb * frag_stride) / nb;
+ pad &= ~(SMP_CACHE_BYTES - 1);
+ priv->frag_info[i].frag_stride = frag_stride + pad;
+
+ buf_size += frag_size;
i++;
}
+ priv->dma_dir = PCI_DMA_FROMDEVICE;
+ priv->rx_headroom = 0;
}
priv->num_frags = i;
@@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d stride:%d\n",
+ " frag:%d - size:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
- priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_stride);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 95290e1fc9fe..17112faafbcc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
- bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
- gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
- priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -111,9 +108,6 @@ mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
- if (gro_enabled)
- priv->dev->features |= NETIF_F_GRO;
-
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ed42199d3f1..3ba89bc43d74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
- .page_offset = XDP_PACKET_HEADROOM,
- .page_size = PAGE_SIZE,
};
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->frag_info[0].dma_dir);
+ PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
}
@@ -980,8 +978,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tso_packets++;
- i = ((skb->len - lso_header_size) / shinfo->gso_size) +
- !!((skb->len - lso_header_size) % shinfo->gso_size);
+ i = shinfo->gso_segs;
tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
ring->packets += i;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3629ce11a68b..39f401aa3047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -102,17 +102,6 @@
/* Use the maximum between 16384 and a single page */
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
-#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
- PAGE_ALLOC_COSTLY_ORDER)
-
-/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
- * and 4K allocations) */
-enum {
- FRAG_SZ0 = 1536 - NET_IP_ALIGN,
- FRAG_SZ1 = 4096,
- FRAG_SZ2 = 4096,
- FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
-};
#define MLX4_EN_MAX_RX_FRAGS 4
/* Maximum ring sizes */
@@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
u32 page_offset;
- u32 page_size;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+
struct mlx4_en_page_cache {
u32 index;
- struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+ struct {
+ struct page *page;
+ dma_addr_t dma;
+ } buf[MLX4_EN_CACHE_SIZE];
};
struct mlx4_en_priv;
@@ -335,7 +327,6 @@ struct mlx4_en_rx_desc {
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long rx_alloc_pages;
unsigned long xdp_drop;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
@@ -472,11 +464,7 @@ struct mlx4_en_mc_list {
struct mlx4_en_frag_info {
u16 frag_size;
- u16 frag_prefix_size;
u32 frag_stride;
- enum dma_data_direction dma_dir;
- u16 order;
- u16 rx_headroom;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -584,8 +572,10 @@ struct mlx4_en_priv {
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
- u16 num_frags;
- u16 log_rx_info;
+ u8 num_frags;
+ u8 log_rx_info;
+ u8 dma_dir;
+ u16 rx_headroom;
struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 48641cb0367f..926f3c3f3665 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -37,7 +37,7 @@ struct mlx4_en_port_stats {
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
- unsigned long rx_alloc_failed;
+ unsigned long rx_alloc_pages;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long rx_chksum_complete;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d8d5d161b8c7..4aa29ee93013 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2749,7 +2749,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
- struct res_mpt *mpt;
+ struct res_mpt *mpt = NULL;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 117170014e88..a84b652f9b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,3 +31,10 @@ config MLX5_CORE_EN_DCB
This flag is depended on the kernel's DCB support.
If unsure, set to Y
+
+config MLX5_CORE_IPOIB
+ bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
+ depends on MLX5_CORE_EN
+ default y
+ ---help---
+ MLX5 IPoIB offloads & acceleration support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9f43beb86250..9e644615f07a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -11,3 +11,5 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a380353a78c2..5bdaf3d545b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
case MLX5_CMD_OP_DETACH_FROM_MCG:
@@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
- case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -363,6 +364,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -414,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
- case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -501,6 +503,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
+ MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+ MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR);
@@ -576,12 +584,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
- MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
- MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
+ MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d9490cd2db1..0099a3e397bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -37,6 +37,7 @@
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/crash_dump.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -111,18 +112,13 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
-#define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_IHS_DS_COUNT \
- DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-#define MLX5E_XDP_TX_WQEBBS \
- DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
#define MLX5E_NUM_MAIN_GROUPS 9
@@ -158,6 +154,14 @@ static inline int mlx5_max_log_rq_size(int wq_type)
}
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return is_kdump_kernel() ?
+ MLX5E_MIN_NUM_CHANNELS :
+ min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -187,15 +191,15 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
};
-#define MLX5E_SET_PFLAG(priv, pflag, enable) \
+#define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \
if (enable) \
- (priv)->params.pflags |= (pflag); \
+ (params)->pflags |= (pflag); \
else \
- (priv)->params.pflags &= ~(pflag); \
+ (params)->pflags &= ~(pflag); \
} while (0)
-#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -218,7 +222,6 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
- u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -227,9 +230,11 @@ struct mlx5e_params {
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
+ bool scatter_fcs_en;
bool rx_am_enabled;
u32 lro_timeout;
u32 pflags;
+ struct bpf_prog *xdp_prog;
};
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -285,7 +290,6 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
- struct mlx5e_priv *priv;
/* cqe decompression */
struct mlx5_cqe64 title;
@@ -295,22 +299,163 @@ struct mlx5e_cq {
u16 decmprs_wqe_counter;
/* control */
+ struct mlx5_core_dev *mdev;
struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
-struct mlx5e_rq;
-typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe);
-typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
- u16 ix);
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+enum mlx5e_dma_map_type {
+ MLX5E_DMA_MAP_SINGLE,
+ MLX5E_DMA_MAP_PAGE
+};
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+ enum mlx5e_dma_map_type type;
+};
+
+enum {
+ MLX5E_SQ_STATE_ENABLED,
+};
+
+struct mlx5e_sq_wqe_info {
+ u8 opcode;
+ u8 num_wqebbs;
+};
+
+struct mlx5e_txqsq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u16 max_inline;
+ u8 min_inline_mode;
+ u16 edge;
+ struct device *pdev;
+ struct mlx5e_tstamp *tstamp;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+ int txq_ix;
+ u32 rate_limit;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_xdpsq {
+ /* data path */
+
+ /* dirtied @rx completion */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+ struct device *pdev;
+ __be32 mkey_be;
+ u8 min_inline_mode;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_icosq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u16 prev_cc;
-typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+ u16 edge;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+static inline bool
+mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
+{
+ return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
+}
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
};
+struct mlx5e_umr_dma_info {
+ __be64 *mtt;
+ dma_addr_t mtt_addr;
+ struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+ struct mlx5e_umr_wqe wqe;
+};
+
+struct mlx5e_mpw_info {
+ struct mlx5e_umr_dma_info umr;
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+};
+
struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */
int epms; /* events per msec */
@@ -347,6 +492,11 @@ struct mlx5e_page_cache {
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
};
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
@@ -381,7 +531,10 @@ struct mlx5e_rq {
u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+
+ /* XDP */
struct bpf_prog *xdp_prog;
+ struct mlx5e_xdpsq xdpsq;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -390,118 +543,10 @@ struct mlx5e_rq {
u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
- struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;
-struct mlx5e_umr_dma_info {
- __be64 *mtt;
- dma_addr_t mtt_addr;
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
- struct mlx5e_umr_wqe wqe;
-};
-
-struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
- u16 consumed_strides;
- u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
-struct mlx5e_tx_wqe_info {
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-};
-
-enum mlx5e_dma_map_type {
- MLX5E_DMA_MAP_SINGLE,
- MLX5E_DMA_MAP_PAGE
-};
-
-struct mlx5e_sq_dma {
- dma_addr_t addr;
- u32 size;
- enum mlx5e_dma_map_type type;
-};
-
-enum {
- MLX5E_SQ_STATE_ENABLED,
- MLX5E_SQ_STATE_BF_ENABLE,
-};
-
-struct mlx5e_sq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-};
-
-enum mlx5e_sq_type {
- MLX5E_SQ_TXQ,
- MLX5E_SQ_ICO,
- MLX5E_SQ_XDP
-};
-
-struct mlx5e_sq {
- /* data path */
-
- /* dirtied @completion */
- u16 cc;
- u32 dma_fifo_cc;
-
- /* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
- u32 dma_fifo_pc;
- u16 bf_offset;
- u16 prev_cc;
- u8 bf_budget;
- struct mlx5e_sq_stats stats;
-
- struct mlx5e_cq cq;
-
- /* pointers to per tx element info: write@xmit, read@completion */
- union {
- struct {
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } txq;
- struct mlx5e_sq_wqe_info *ico_wqe;
- struct {
- struct mlx5e_sq_wqe_info *wqe_info;
- struct mlx5e_dma_info *di;
- bool doorbell;
- } xdp;
- } db;
-
- /* read only */
- struct mlx5_wq_cyc wq;
- u32 dma_fifo_mask;
- void __iomem *uar_map;
- struct netdev_queue *txq;
- u32 sqn;
- u16 bf_buf_size;
- u16 max_inline;
- u8 min_inline_mode;
- u16 edge;
- struct device *pdev;
- struct mlx5e_tstamp *tstamp;
- __be32 mkey_be;
- unsigned long state;
-
- /* control path */
- struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_sq_bfreg bfreg;
- struct mlx5e_channel *channel;
- int tc;
- u32 rate_limit;
- u8 type;
-} ____cacheline_aligned_in_smp;
-
-static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
-{
- return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
- (sq->cc == sq->pc));
-}
-
enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1,
};
@@ -509,9 +554,8 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
- struct mlx5e_sq xdp_sq;
- struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
- struct mlx5e_sq icosq; /* internal control operations */
+ struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_icosq icosq; /* internal control operations */
bool xdp;
struct napi_struct napi;
struct device *pdev;
@@ -522,10 +566,18 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tstamp *tstamp;
int ix;
int cpu;
};
+struct mlx5e_channels {
+ struct mlx5e_channel **c;
+ unsigned int num;
+ struct mlx5e_params params;
+};
+
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
@@ -675,34 +727,17 @@ enum {
MLX5E_NIC_PRIO
};
-struct mlx5e_profile {
- void (*init)(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile, void *ppriv);
- void (*cleanup)(struct mlx5e_priv *priv);
- int (*init_rx)(struct mlx5e_priv *priv);
- void (*cleanup_rx)(struct mlx5e_priv *priv);
- int (*init_tx)(struct mlx5e_priv *priv);
- void (*cleanup_tx)(struct mlx5e_priv *priv);
- void (*enable)(struct mlx5e_priv *priv);
- void (*disable)(struct mlx5e_priv *priv);
- void (*update_stats)(struct mlx5e_priv *priv);
- int (*max_nch)(struct mlx5_core_dev *mdev);
- int max_tc;
-};
-
struct mlx5e_priv {
/* priv data path fields - start */
- struct mlx5e_sq **txq_to_sq_map;
- int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
- struct bpf_prog *xdp_prog;
+ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
+ int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
- struct mlx5e_channel **channel;
+ struct mlx5e_channels channels;
u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
@@ -712,7 +747,6 @@ struct mlx5e_priv {
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
- struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
@@ -732,9 +766,28 @@ struct mlx5e_priv {
void *ppriv;
};
+struct mlx5e_profile {
+ void (*init)(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+ void (*cleanup)(struct mlx5e_priv *priv);
+ int (*init_rx)(struct mlx5e_priv *priv);
+ void (*cleanup_rx)(struct mlx5e_priv *priv);
+ int (*init_tx)(struct mlx5e_priv *priv);
+ void (*cleanup_tx)(struct mlx5e_priv *priv);
+ void (*enable)(struct mlx5e_priv *priv);
+ void (*disable)(struct mlx5e_priv *priv);
+ void (*update_stats)(struct mlx5e_priv *priv);
+ int (*max_nch)(struct mlx5_core_dev *mdev);
+ struct {
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+ } rx_handlers;
+ int max_tc;
+};
+
void mlx5e_build_ptys2ethtool_map(void);
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -744,7 +797,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
@@ -792,7 +847,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -801,14 +856,40 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+struct mlx5e_redirect_rqt_param {
+ bool is_rss;
+ union {
+ u32 rqn; /* Direct RQN (Non-RSS) */
+ struct {
+ u8 hfunc;
+ struct mlx5e_channels *channels;
+ } rss; /* RSS data */
+ };
+};
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
- enum mlx5e_traffic_types tt);
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+ struct mlx5e_redirect_rqt_param rrp);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+ enum mlx5e_traffic_types tt,
+ void *tirc);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs);
+void mlx5e_close_channels(struct mlx5e_channels *chs);
+
+/* Function pointer to be used to modify WH settings while
+ * switching channels
+ */
+typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify);
+void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
+void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
+
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
int num_channels);
@@ -816,30 +897,43 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type);
-static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
+static inline
+struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
- u16 ofst = sq->bf_offset;
+ u16 pi = *pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
+ (*pc)++;
+
+ return wqe;
+}
+
+static inline
+void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
+ void __iomem *uar_map,
+ struct mlx5_wqe_ctrl_seg *ctrl)
+{
+ ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
- *sq->wq.db = cpu_to_be32(sq->pc);
+ *wq->db = cpu_to_be32(pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
- if (bf_sz)
- __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
- else
- mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
- /* flush the write-combining mapped buffer */
- wmb();
- sq->bf_offset ^= sq->bf_buf_size;
+ mlx5_write64((__be32 *)ctrl, uar_map, NULL);
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -895,44 +989,43 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
- bool enable_uc_lb);
-
-struct mlx5_eswitch_rep;
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
+
+/* common netdev helpers */
+int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
+
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
+ u32 underlay_qpn, u32 *tisn);
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
+
int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp);
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+/* mlx5e generic netdev management API */
+struct net_device*
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ void *ppriv);
+int mlx5e_attach_netdev(struct mlx5e_priv *priv);
+void mlx5e_detach_netdev(struct mlx5e_priv *priv);
+void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 max_channels);
-bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 68419a01db36..c8a005326e30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
- struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
@@ -325,10 +321,16 @@ static int arfs_create_table(struct mlx5e_priv *priv,
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
+ ft_attr.level = MLX5E_ARFS_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -469,15 +471,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 37e66eef6fb5..e706a87fc8b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -90,6 +90,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
+ int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
@@ -111,7 +112,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -129,7 +130,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
- mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err) {
+ netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index bd898d8deda0..f1f17f7a3cd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain;
}
+ err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
+ if (err) {
+ mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ goto err_destroy_mkey;
+ }
+
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
return 0;
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &res->mkey);
err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd:
@@ -122,23 +130,26 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
struct mlx5e_resources *res = &mdev->mlx5e_res;
+ mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
}
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
- bool enable_uc_lb)
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- void *in;
+ int err = -ENOMEM;
+ u32 tirn = 0;
int inlen;
- int err = 0;
+ void *in;
+
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
- return -ENOMEM;
+ goto out;
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -147,13 +158,16 @@ int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
- err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+ tirn = tir->tirn;
+ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
if (err)
goto out;
}
out:
kvfree(in);
+ if (err)
+ netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a004a5a1a4c2..ce7b09d72ff6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,8 +42,9 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d",
- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ "%d.%d.%04d (%.16s)",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+ mdev->board_id);
strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
sizeof(drvinfo->bus_info));
}
@@ -152,12 +153,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
}
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) \
- (NUM_RQ_STATS * priv->params.num_channels * \
- test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
- test_bit(MLX5E_STATE_OPENED, &priv->state))
+ (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
#define MLX5E_NUM_PFC_COUNTERS(priv) \
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
@@ -262,17 +260,17 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
return;
/* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format,
- priv->channeltc_to_txq_map[i][tc]);
+ priv->channel_tc2txq[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
@@ -303,6 +301,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
unsigned long pfc_combined;
@@ -313,6 +312,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv);
+ channels = &priv->channels;
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_SW_COUNTERS; i++)
@@ -382,16 +382,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
return;
/* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
rq_stats_desc, j);
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+ data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
sq_stats_desc, j);
}
@@ -406,8 +406,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
- stride_size = 1 << priv->params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
@@ -427,8 +427,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
- stride_size = 1 << priv->params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
@@ -443,26 +443,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int rq_wq_type = priv->params.rq_wq_type;
+ int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << priv->params.log_rq_size);
- param->tx_pending = 1 << priv->params.log_sq_size;
+ 1 << priv->channels.params.log_rq_size);
+ param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool was_opened;
- int rq_wq_type = priv->params.rq_wq_type;
+ int rq_wq_type = priv->channels.params.rq_wq_type;
+ struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
u32 max_rq_size;
- u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -500,7 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
}
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
- if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
@@ -522,26 +521,29 @@ static int mlx5e_set_ringparam(struct net_device *dev,
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
- if (log_rq_size == priv->params.log_rq_size &&
- log_sq_size == priv->params.log_sq_size &&
- min_rx_wqes == priv->params.min_rx_wqes)
+ if (log_rq_size == priv->channels.params.log_rq_size &&
+ log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.log_rq_size = log_rq_size;
+ new_channels.params.log_sq_size = log_sq_size;
- priv->params.log_rq_size = log_rq_size;
- priv->params.log_sq_size = log_sq_size;
- priv->params.min_rx_wqes = min_rx_wqes;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto unlock;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto unlock;
- if (was_opened)
- err = mlx5e_open_locked(dev);
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+unlock:
mutex_unlock(&priv->state_lock);
return err;
@@ -553,7 +555,7 @@ static void mlx5e_get_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
ch->max_combined = priv->profile->max_nch(priv->mdev);
- ch->combined_count = priv->params.num_channels;
+ ch->combined_count = priv->channels.params.num_channels;
}
static int mlx5e_set_channels(struct net_device *dev,
@@ -561,8 +563,8 @@ static int mlx5e_set_channels(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
unsigned int count = ch->combined_count;
+ struct mlx5e_channels new_channels = {};
bool arfs_enabled;
- bool was_opened;
int err = 0;
if (!count) {
@@ -571,27 +573,32 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
- if (priv->params.num_channels == count)
+ if (priv->channels.params.num_channels == count)
return 0;
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.num_channels = count;
+ mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ /* Create fresh channels with new parameters */
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
arfs_enabled = dev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
- priv->params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
-
- if (was_opened)
- err = mlx5e_open_locked(dev);
- if (err)
- goto out;
+ /* Switch to new channels, set new parameters and close old ones */
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
@@ -614,49 +621,24 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
- coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
- coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
- coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
+ coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec;
+ coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
+ coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
+ coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
return 0;
}
-static int mlx5e_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+static void
+mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channel *c;
- bool restart =
- !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
- bool was_opened;
- int err = 0;
int tc;
int i;
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->state_lock);
-
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened && restart) {
- mlx5e_close_locked(netdev);
- priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
- }
-
- priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
- priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
- priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
- priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-
- if (!was_opened || restart)
- goto out;
-
- for (i = 0; i < priv->params.num_channels; ++i) {
- c = priv->channel[i];
+ for (i = 0; i < priv->channels.num; ++i) {
+ struct mlx5e_channel *c = priv->channels.c[i];
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@@ -669,11 +651,50 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_coalesce_usecs,
coal->rx_max_coalesced_frames);
}
+}
-out:
- if (was_opened && restart)
- err = mlx5e_open_locked(netdev);
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+ bool reset;
+
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+
+ new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+ new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+ new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+ new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+ new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+ /* we are opened */
+
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ if (!reset) {
+ mlx5e_set_priv_channels_coalesce(priv, coal);
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ /* open fresh channels with new coal parameters */
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+out:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -968,7 +989,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return sizeof(priv->params.toeplitz_hash_key);
+ return sizeof(priv->channels.params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
@@ -982,15 +1003,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (indir)
- memcpy(indir, priv->params.indirection_rqt,
- sizeof(priv->params.indirection_rqt));
+ memcpy(indir, priv->channels.params.indirection_rqt,
+ sizeof(priv->channels.params.indirection_rqt));
if (key)
- memcpy(key, priv->params.toeplitz_hash_key,
- sizeof(priv->params.toeplitz_hash_key));
+ memcpy(key, priv->channels.params.toeplitz_hash_key,
+ sizeof(priv->channels.params.toeplitz_hash_key));
if (hfunc)
- *hfunc = priv->params.rss_hfunc;
+ *hfunc = priv->channels.params.rss_hfunc;
return 0;
}
@@ -1006,7 +1027,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
}
@@ -1030,25 +1051,37 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
- if (indir) {
- u32 rqtn = priv->indir_rqt.rqtn;
-
- memcpy(priv->params.indirection_rqt, indir,
- sizeof(priv->params.indirection_rqt));
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
- }
-
if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != priv->params.rss_hfunc) {
- priv->params.rss_hfunc = hfunc;
+ hfunc != priv->channels.params.rss_hfunc) {
+ priv->channels.params.rss_hfunc = hfunc;
hash_changed = true;
}
+ if (indir) {
+ memcpy(priv->channels.params.indirection_rqt, indir,
+ sizeof(priv->channels.params.indirection_rqt));
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ u32 rqtn = priv->indir_rqt.rqtn;
+ struct mlx5e_redirect_rqt_param rrp = {
+ .is_rss = true,
+ {
+ .rss = {
+ .hfunc = priv->channels.params.rss_hfunc,
+ .channels = &priv->channels,
+ },
+ },
+ };
+
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+ }
+ }
+
if (key) {
- memcpy(priv->params.toeplitz_hash_key, key,
- sizeof(priv->params.toeplitz_hash_key));
+ memcpy(priv->channels.params.toeplitz_hash_key, key,
+ sizeof(priv->channels.params.toeplitz_hash_key));
hash_changed = hash_changed ||
- priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+ priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
}
if (hash_changed)
@@ -1069,7 +1102,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = priv->params.num_channels;
+ info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
@@ -1097,7 +1130,7 @@ static int mlx5e_get_tunable(struct net_device *dev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
- *(u32 *)data = priv->params.tx_max_inline;
+ *(u32 *)data = priv->channels.params.tx_max_inline;
break;
default:
err = -EINVAL;
@@ -1113,9 +1146,11 @@ static int mlx5e_set_tunable(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- bool was_opened;
- u32 val;
+ struct mlx5e_channels new_channels = {};
int err = 0;
+ u32 val;
+
+ mutex_lock(&priv->state_lock);
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
@@ -1125,24 +1160,26 @@ static int mlx5e_set_tunable(struct net_device *dev,
break;
}
- mutex_lock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+ new_channels.params.tx_max_inline = val;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
-
- priv->params.tx_max_inline = val;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ break;
+ }
- if (was_opened)
- err = mlx5e_open_locked(dev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ break;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
- mutex_unlock(&priv->state_lock);
break;
default:
err = -EINVAL;
break;
}
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -1442,15 +1479,15 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
bool rx_mode_changed;
u8 rx_cq_period_mode;
int err = 0;
- bool reset;
rx_cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+ rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
@@ -1459,16 +1496,51 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (!rx_mode_changed)
return 0;
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (reset)
- mlx5e_close_locked(netdev);
+ new_channels.params = priv->channels.params;
+ mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
- mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
- if (reset)
- err = mlx5e_open_locked(netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
- return err;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+{
+ bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+ return new_val ? -EOPNOTSUPP : 0;
+
+ if (curr_val == new_val)
+ return 0;
+
+ new_channels.params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+
+ mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
+ new_channels.params.rq_wq_type);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
}
static int set_pflag_rx_cqe_compress(struct net_device *netdev,
@@ -1486,8 +1558,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
}
mlx5e_modify_rx_cqe_compression_locked(priv, enable);
- priv->params.rx_cqe_compress_def = enable;
- mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
+ priv->channels.params.rx_cqe_compress_def = enable;
return 0;
}
@@ -1499,7 +1570,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & flag);
- u32 changes = wanted_flags ^ priv->params.pflags;
+ u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
if (!(changes & flag))
@@ -1512,7 +1583,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
return err;
}
- MLX5E_SET_PFLAG(priv, flag, enable);
+ MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
return 0;
}
@@ -1541,7 +1612,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return priv->params.pflags;
+ return priv->channels.params.pflags;
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f2762e45c8ae..576d6787b484 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u16 etype,
u8 proto)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -800,7 +792,7 @@ err:
return err;
}
-static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
@@ -808,14 +800,19 @@ static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn)
{
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
+ ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr.level = MLX5E_TTC_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.underlay_qpn = underlay_qpn;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -848,13 +845,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
u8 *mc_dmac;
@@ -985,12 +978,16 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
struct mlx5e_l2_table *l2_table = &priv->fs.l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
+ ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
+ ft_attr.level = MLX5E_L2_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1088,11 +1085,16 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
+
+ ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
+ ft_attr.level = MLX5E_VLAN_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1145,7 +1147,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(priv, 0);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 26fc77e80f7b..85bf4a389295 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv,
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- if (fs->ring_cookie >= priv->params.num_channels &&
+ if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 15cc7b469d2e..a61b71b6fff3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,28 +31,24 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <linux/crash_dump.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <linux/bpf.h>
+#include "eswitch.h"
#include "en.h"
#include "en_tc.h"
-#include "eswitch.h"
+#include "en_rep.h"
#include "vxlan.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
- bool am_enabled;
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
- u16 max_inline;
- u8 min_inline_mode;
- enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -79,49 +75,47 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type)
{
- priv->params.rq_wq_type = rq_type;
- priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
- switch (priv->params.rq_wq_type) {
+ params->rq_wq_type = rq_type;
+ params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = is_kdump_kernel() ?
+ params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
- MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ params->mpwqe_log_stride_sz =
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
+ MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
+ MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+ params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ params->mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = is_kdump_kernel() ?
+ params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
/* Extra room needed for build_skb */
- priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
+ params->lro_wqe_sz -= MLX5_RX_HEADROOM +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
- mlx5_core_info(priv->mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(params->log_rq_size),
+ BIT(params->mpwqe_log_stride_sz),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
- !priv->xdp_prog ?
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ !params->xdp_prog ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_set_rq_type_params(priv, rq_type);
+ mlx5e_set_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -181,8 +175,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
int i, j;
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->params.num_channels; i++) {
- rq_stats = &priv->channel[i]->rq.stats;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
@@ -204,8 +200,8 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
- for (j = 0; j < priv->params.num_tc; j++) {
- sq_stats = &priv->channel[i]->sq[j].stats;
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -402,8 +398,10 @@ static inline int mlx5e_get_wqe_mtt_sz(void)
MLX5_UMR_MTT_ALIGNMENT);
}
-static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
- struct mlx5e_umr_wqe *wqe, u16 ix)
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_icosq *sq,
+ struct mlx5e_umr_wqe *wqe,
+ u16 ix)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
@@ -493,11 +491,10 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
kfree(rq->mpwqe.info);
}
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u64 npages, u8 page_shift,
struct mlx5_core_mkey *umr_mkey)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
@@ -531,21 +528,20 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
+static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- struct mlx5e_priv *priv = rq->priv;
- u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
+ u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
- return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
+ return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
-static int mlx5e_create_rq(struct mlx5e_channel *c,
- struct mlx5e_rq_param *param,
- struct mlx5e_rq *rq)
+static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq *rq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
+ struct mlx5_core_dev *mdev = c->mdev;
+ void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
u32 frag_sz;
@@ -554,9 +550,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
- param->wq.db_numa_node = cpu_to_node(c->cpu);
+ rqp->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
return err;
@@ -565,15 +561,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->wq_type = priv->params.rq_wq_type;
+ rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
- rq->tstamp = &priv->tstamp;
+ rq->tstamp = c->tstamp;
rq->channel = c;
rq->ix = c->ix;
- rq->priv = c->priv;
+ rq->mdev = mdev;
- rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
+ rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
err = PTR_ERR(rq->xdp_prog);
rq->xdp_prog = NULL;
@@ -588,24 +584,26 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->rx_headroom = MLX5_RX_HEADROOM;
}
- switch (priv->params.rq_wq_type) {
+ switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- if (mlx5e_is_vf_vport_rep(priv)) {
- err = -EINVAL;
- goto err_rq_wq_destroy;
- }
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
- rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
+ if (!rq->handle_rx_cqe) {
+ err = -EINVAL;
+ netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
+ goto err_rq_wq_destroy;
+ }
+
+ rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
+ rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
byte_count = rq->buff.wqe_sz;
- err = mlx5e_create_rq_umr_mkey(rq);
+ err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
@@ -621,18 +619,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
err = -ENOMEM;
goto err_rq_wq_destroy;
}
-
- if (mlx5e_is_vf_vport_rep(priv))
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
- else
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
-
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->buff.wqe_sz = (priv->params.lro_en) ?
- priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
+ if (!rq->handle_rx_cqe) {
+ kfree(rq->dma_info);
+ err = -EINVAL;
+ netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
+ goto err_rq_wq_destroy;
+ }
+
+ rq->buff.wqe_sz = params->lro_en ?
+ params->lro_wqe_sz :
+ MLX5E_SW2HW_MTU(c->netdev->mtu);
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
@@ -656,8 +656,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = priv->params.rx_cq_period_mode;
-
+ rq->am.mode = params->rx_cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -674,7 +673,7 @@ err_rq_wq_destroy:
return err;
}
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
int i;
@@ -684,7 +683,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
- mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
kfree(rq->dma_info);
@@ -699,10 +698,10 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+static int mlx5e_create_rq(struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- struct mlx5e_priv *priv = rq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
@@ -723,7 +722,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -742,8 +740,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int next_state)
{
struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = c->mdev;
void *in;
void *rqc;
@@ -767,7 +764,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
return err;
}
-static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -787,6 +784,35 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
+ MLX5_SET(rqc, rqc, scatter_fcs, enable);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5_core_dev *mdev = c->mdev;
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
@@ -798,25 +824,28 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
-static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+
struct mlx5_wq_ll *wq = &rq->wq;
+ u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
while (time_before(jiffies, exp_time)) {
- if (wq->cur_sz >= priv->params.min_rx_wqes)
+ if (wq->cur_sz >= min_wqes)
return 0;
msleep(20);
}
+ netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+ rq->rqn, wq->cur_sz, min_wqes);
return -ETIMEDOUT;
}
@@ -842,83 +871,128 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
- struct mlx5e_sq *sq = &c->icosq;
- u16 pi = sq->pc & sq->wq.sz_m1;
int err;
- err = mlx5e_create_rq(c, param, rq);
+ err = mlx5e_alloc_rq(c, params, param, rq);
if (err)
return err;
- err = mlx5e_enable_rq(rq, param);
+ err = mlx5e_create_rq(rq, param);
if (err)
- goto err_destroy_rq;
+ goto err_free_rq;
- set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
- goto err_disable_rq;
+ goto err_destroy_rq;
- if (param->am_enabled)
+ if (params->rx_am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.ico_wqe[pi].num_wqebbs = 1;
- mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
-
return 0;
-err_disable_rq:
- clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
+err_free_rq:
+ mlx5e_free_rq(rq);
return err;
}
-static void mlx5e_close_rq(struct mlx5e_rq *rq)
+static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
+ u16 pi = sq->pc & sq->wq.sz_m1;
+ struct mlx5e_tx_wqe *nopwqe;
+
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
+ nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+}
+
+static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- cancel_work_sync(&rq->am.work);
+}
- mlx5e_disable_rq(rq);
- mlx5e_free_rx_descs(rq);
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ cancel_work_sync(&rq->am.work);
mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+ mlx5e_free_rq(rq);
}
-static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
+static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.xdp.di);
- kfree(sq->db.xdp.wqe_info);
+ kfree(sq->db.di);
}
-static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
GFP_KERNEL, numa);
- sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
- if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
- mlx5e_free_sq_xdp_db(sq);
+ if (!sq->db.di) {
+ mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
return 0;
}
-static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_xdpsq *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->min_inline_mode = params->tx_min_inline_mode;
+
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+
+ err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
+{
+ mlx5e_free_xdpsq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
kfree(sq->db.ico_wqe);
}
-static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
@@ -930,155 +1004,128 @@ static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
return 0;
}
-static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_icosq *sq)
{
- kfree(sq->db.txq.wqe_info);
- kfree(sq->db.txq.dma_fifo);
- kfree(sq->db.txq.skb);
-}
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
-static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
-{
- int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
- sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
- GFP_KERNEL, numa);
- sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
- GFP_KERNEL, numa);
- if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
- mlx5e_free_sq_txq_db(sq);
- return -ENOMEM;
- }
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- sq->dma_fifo_mask = df_sz - 1;
+ err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+ return err;
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- mlx5e_free_sq_txq_db(sq);
- break;
- case MLX5E_SQ_ICO:
- mlx5e_free_sq_ico_db(sq);
- break;
- case MLX5E_SQ_XDP:
- mlx5e_free_sq_xdp_db(sq);
- break;
- }
+ mlx5e_free_icosq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- return mlx5e_alloc_sq_txq_db(sq, numa);
- case MLX5E_SQ_ICO:
- return mlx5e_alloc_sq_ico_db(sq, numa);
- case MLX5E_SQ_XDP:
- return mlx5e_alloc_sq_xdp_db(sq, numa);
- }
-
- return 0;
+ kfree(sq->db.wqe_info);
+ kfree(sq->db.dma_fifo);
}
-static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
{
- switch (sq_type) {
- case MLX5E_SQ_ICO:
- return MLX5E_ICOSQ_MAX_WQEBBS;
- case MLX5E_SQ_XDP:
- return MLX5E_XDP_TX_WQEBBS;
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.dma_fifo || !sq->db.wqe_info) {
+ mlx5e_free_txqsq_db(sq);
+ return -ENOMEM;
}
- return MLX5_SEND_WQE_MAX_WQEBBS;
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
}
-static int mlx5e_create_sq(struct mlx5e_channel *c,
- int tc,
- struct mlx5e_sq_param *param,
- struct mlx5e_sq *sq)
+static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
+ int txq_ix,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- void *sqc = param->sqc;
- void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
int err;
- sq->type = param->type;
sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
+ sq->tstamp = c->tstamp;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->tc = tc;
-
- err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
- if (err)
- return err;
+ sq->txq_ix = txq_ix;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->max_inline = params->tx_max_inline;
+ sq->min_inline_mode = params->tx_min_inline_mode;
- sq->uar_map = sq->bfreg.map;
param->wq.db_numa_node = cpu_to_node(c->cpu);
-
- err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
- &sq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
- goto err_unmap_free_uar;
-
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- if (sq->bfreg.wc)
- set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-
- sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
- sq->max_inline = param->max_inline;
- sq->min_inline_mode = param->min_inline_mode;
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
- if (sq->type == MLX5E_SQ_TXQ) {
- int txq_ix;
-
- txq_ix = c->ix + tc * priv->params.num_channels;
- sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- priv->txq_to_sq_map[txq_ix] = sq;
- }
-
- sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
- sq->bf_budget = MLX5E_SQ_BF_BUDGET;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
-err_unmap_free_uar:
- mlx5_free_bfreg(mdev, &sq->bfreg);
-
return err;
}
-static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
-
- mlx5e_free_sq_db(sq);
+ mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
- mlx5_free_bfreg(priv->mdev, &sq->bfreg);
}
-static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
-{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_create_sq_param {
+ struct mlx5_wq_ctrl *wq_ctrl;
+ u32 cqn;
+ u32 tisn;
+ u8 tis_lst_sz;
+ u8 min_inline_mode;
+};
+static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn)
+{
void *in;
void *sqc;
void *wq;
@@ -1086,7 +1133,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
int err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
- sizeof(u64) * sq->wq_ctrl.buf.npages;
+ sizeof(u64) * csp->wq_ctrl->buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
@@ -1095,40 +1142,40 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, param->sqc, sizeof(param->sqc));
-
- MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
- 0 : priv->tisn[sq->tc]);
- MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
+ MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
+ MLX5_SET(sqc, sqc, cqn, csp->cqn);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
- MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
+ MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
- MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
- MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+ MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
- mlx5_fill_page_array(&sq->wq_ctrl.buf,
- (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+ mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
- err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ err = mlx5_core_create_sq(mdev, in, inlen, sqn);
kvfree(in);
return err;
}
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
- int next_state, bool update_rl, int rl_index)
-{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_modify_sq_param {
+ int curr_state;
+ int next_state;
+ bool rl_update;
+ int rl_index;
+};
+static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
+ struct mlx5e_modify_sq_param *p)
+{
void *in;
void *sqc;
int inlen;
@@ -1141,68 +1188,94 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- MLX5_SET(modify_sq_in, in, sq_state, curr_state);
- MLX5_SET(sqc, sqc, state, next_state);
- if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+ MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
+ MLX5_SET(sqc, sqc, state, p->next_state);
+ if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
- err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_sq(mdev, sq->sqn);
- if (sq->rate_limit)
- mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ mlx5_core_destroy_sq(mdev, sqn);
}
-static int mlx5e_open_sq(struct mlx5e_channel *c,
- int tc,
- struct mlx5e_sq_param *param,
- struct mlx5e_sq *sq)
+static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn)
{
+ struct mlx5e_modify_sq_param msp = {0};
int err;
- err = mlx5e_create_sq(c, tc, param, sq);
+ err = mlx5e_create_sq(mdev, param, csp, sqn);
if (err)
return err;
- err = mlx5e_enable_sq(sq, param);
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+ err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
- goto err_destroy_sq;
+ mlx5e_destroy_sq(mdev, *sqn);
- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
- false, 0);
+ return err;
+}
+
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_txqsq *sq, u32 rate);
+
+static int mlx5e_open_txqsq(struct mlx5e_channel *c,
+ u32 tisn,
+ int txq_ix,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_create_sq_param csp = {};
+ u32 tx_rate;
+ int err;
+
+ err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
if (err)
- goto err_disable_sq;
+ return err;
- if (sq->txq) {
- netdev_tx_reset_queue(sq->txq);
- netif_tx_start_queue(sq->txq);
- }
+ csp.tisn = tisn;
+ csp.tis_lst_sz = 1;
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = sq->min_inline_mode;
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_txqsq;
+
+ tx_rate = c->priv->tx_rates[sq->txq_ix];
+ if (tx_rate)
+ mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
return 0;
-err_disable_sq:
+err_free_txqsq:
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- mlx5e_disable_sq(sq);
-err_destroy_sq:
- mlx5e_destroy_sq(sq);
+ mlx5e_free_txqsq(sq);
return err;
}
+static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
+{
+ sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+}
+
static inline void netif_tx_disable_queue(struct netdev_queue *txq)
{
__netif_tx_lock_bh(txq);
@@ -1210,43 +1283,153 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
__netif_tx_unlock_bh(txq);
}
-static void mlx5e_close_sq(struct mlx5e_sq *sq)
+static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
+ struct mlx5e_channel *c = sq->channel;
+
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
- napi_synchronize(&sq->channel->napi);
+ napi_synchronize(&c->napi);
- if (sq->txq) {
- netif_tx_disable_queue(sq->txq);
+ netif_tx_disable_queue(sq->txq);
- /* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1)) {
- sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
- mlx5e_send_nop(sq, true);
- }
+ /* last doorbell out, godspeed .. */
+ if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+ struct mlx5e_tx_wqe *nop;
+
+ sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
+ nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
}
+}
+
+static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5_core_dev *mdev = c->mdev;
- mlx5e_disable_sq(sq);
- mlx5e_free_sq_descs(sq);
- mlx5e_destroy_sq(sq);
+ mlx5e_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit)
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ mlx5e_free_txqsq_descs(sq);
+ mlx5e_free_txqsq(sq);
}
-static int mlx5e_create_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_param *param,
- struct mlx5e_cq *cq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_create_sq_param csp = {};
+ int err;
+
+ err = mlx5e_alloc_icosq(c, param, sq);
+ if (err)
+ return err;
+
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = params->tx_min_inline_mode;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_icosq;
+
+ return 0;
+
+err_free_icosq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ mlx5e_free_icosq(sq);
+
+ return err;
+}
+
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ napi_synchronize(&c->napi);
+
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
+ mlx5e_free_icosq(sq);
+}
+
+static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_xdpsq *sq)
+{
+ unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ struct mlx5e_create_sq_param csp = {};
+ unsigned int inline_hdr_sz = 0;
+ int err;
+ int i;
+
+ err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ if (err)
+ return err;
+
+ csp.tis_lst_sz = 1;
+ csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = sq->min_inline_mode;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_xdpsq;
+
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ ds_cnt++;
+ }
+
+ /* Pre initialize fixed WQE fields */
+ for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+ dseg->lkey = sq->mkey_be;
+ }
+
+ return 0;
+
+err_free_xdpsq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ mlx5e_free_xdpsq(sq);
+
+ return err;
+}
+
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ napi_synchronize(&c->napi);
+
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
+ mlx5e_free_xdpsq_descs(sq);
+ mlx5e_free_xdpsq(sq);
+}
+
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
unsigned int irqn;
int err;
u32 i;
- param->wq.buf_numa_node = cpu_to_node(c->cpu);
- param->wq.db_numa_node = cpu_to_node(c->cpu);
- param->eq_ix = c->ix;
-
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
@@ -1254,8 +1437,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
- cq->napi = &c->napi;
-
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
@@ -1272,21 +1453,38 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
cqe->op_own = 0xf1;
}
- cq->channel = c;
- cq->priv = priv;
+ cq->mdev = mdev;
return 0;
}
-static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+static int mlx5e_alloc_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5_core_dev *mdev = c->priv->mdev;
+ int err;
+
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5e_alloc_cq_common(mdev, param, cq);
+
+ cq->napi = &c->napi;
+ cq->channel = c;
+
+ return err;
+}
+
+static void mlx5e_free_cq(struct mlx5e_cq *cq)
{
mlx5_cqwq_destroy(&cq->wq_ctrl);
}
-static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_priv *priv = cq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
void *in;
@@ -1330,47 +1528,41 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
return 0;
}
-static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_priv *priv = cq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_moder moder,
struct mlx5e_cq_param *param,
- struct mlx5e_cq *cq,
- struct mlx5e_cq_moder moderation)
+ struct mlx5e_cq *cq)
{
+ struct mlx5_core_dev *mdev = c->mdev;
int err;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
- err = mlx5e_create_cq(c, param, cq);
+ err = mlx5e_alloc_cq(c, param, cq);
if (err)
return err;
- err = mlx5e_enable_cq(cq, param);
+ err = mlx5e_create_cq(cq, param);
if (err)
- goto err_destroy_cq;
+ goto err_free_cq;
if (MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation.usec,
- moderation.pkts);
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
return 0;
-err_destroy_cq:
- mlx5e_destroy_cq(cq);
+err_free_cq:
+ mlx5e_free_cq(cq);
return err;
}
static void mlx5e_close_cq(struct mlx5e_cq *cq)
{
- mlx5e_disable_cq(cq);
mlx5e_destroy_cq(cq);
+ mlx5e_free_cq(cq);
}
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
@@ -1379,15 +1571,15 @@ static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
- struct mlx5e_priv *priv = c->priv;
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- priv->params.tx_cq_moderation);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation,
+ &cparam->tx_cq, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
}
@@ -1410,13 +1602,17 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
- for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ for (tc = 0; tc < params->num_tc; tc++) {
+ int txq_ix = c->ix + tc * params->num_channels;
+
+ err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ params, &cparam->sq, &c->sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -1425,7 +1621,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
err_close_sqs:
for (tc--; tc >= 0; tc--)
- mlx5e_close_sq(&c->sq[tc]);
+ mlx5e_close_txqsq(&c->sq[tc]);
return err;
}
@@ -1435,23 +1631,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
int tc;
for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
-{
- int i;
-
- for (i = 0; i < priv->profile->max_tc; i++)
- priv->channeltc_to_txq_map[ix][i] =
- ix + i * priv->params.num_channels;
+ mlx5e_close_txqsq(&c->sq[tc]);
}
static int mlx5e_set_sq_maxrate(struct net_device *dev,
- struct mlx5e_sq *sq, u32 rate)
+ struct mlx5e_txqsq *sq, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_modify_sq_param msp = {0};
u16 rl_index = 0;
int err;
@@ -1474,8 +1662,11 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
}
}
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RDY, true, rl_index);
+ msp.curr_state = MLX5_SQC_STATE_RDY;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+ msp.rl_index = rl_index;
+ msp.rl_update = true;
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
if (err) {
netdev_err(dev, "Failed configuring rate %u: %d\n",
rate, err);
@@ -1493,7 +1684,7 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+ struct mlx5e_txqsq *sq = priv->txq2sq[index];
int err = 0;
if (!mlx5_rl_is_supported(mdev)) {
@@ -1520,114 +1711,87 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
- return is_kdump_kernel() ?
- MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
-}
-
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
+ struct mlx5e_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
- struct mlx5e_cq_moder rx_cq_profile;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
- struct mlx5e_sq *sq;
int err;
- int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
c->priv = priv;
+ c->mdev = priv->mdev;
+ c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
- c->num_tc = priv->params.num_tc;
- c->xdp = !!priv->xdp_prog;
-
- if (priv->params.rx_am_enabled)
- rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
- else
- rx_cq_profile = priv->params.rx_cq_moderation;
-
- mlx5e_build_channeltc_to_txq_map(priv, ix);
+ c->num_tc = params->num_tc;
+ c->xdp = !!params->xdp_prog;
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
+ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
if (err)
goto err_napi_del;
- err = mlx5e_open_tx_cqs(c, cparam);
+ err = mlx5e_open_tx_cqs(c, params, cparam);
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
- rx_cq_profile);
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
if (err)
goto err_close_tx_cqs;
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
- err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
- priv->params.tx_cq_moderation) : 0;
+ err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
+ &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
napi_enable(&c->napi);
- err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
if (err)
goto err_disable_napi;
- err = mlx5e_open_sqs(c, cparam);
+ err = mlx5e_open_sqs(c, params, cparam);
if (err)
goto err_close_icosq;
- for (i = 0; i < priv->params.num_tc; i++) {
- u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
-
- if (priv->tx_rates[txq_ix]) {
- sq = priv->txq_to_sq_map[txq_ix];
- mlx5e_set_sq_maxrate(priv->netdev, sq,
- priv->tx_rates[txq_ix]);
- }
- }
-
- err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
if (err)
goto err_close_sqs;
- err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
if (err)
goto err_close_xdp_sq;
- netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_sq(&c->xdp_sq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq);
err_close_sqs:
mlx5e_close_sqs(c);
err_close_icosq:
- mlx5e_close_sq(&c->icosq);
+ mlx5e_close_icosq(&c->icosq);
err_disable_napi:
napi_disable(&c->napi);
if (c->xdp)
- mlx5e_close_cq(&c->xdp_sq.cq);
+ mlx5e_close_cq(&c->rq.xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
@@ -1645,16 +1809,35 @@ err_napi_del:
return err;
}
+static void mlx5e_activate_channel(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_activate_txqsq(&c->sq[tc]);
+ mlx5e_activate_rq(&c->rq);
+ netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
+}
+
+static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
+{
+ int tc;
+
+ mlx5e_deactivate_rq(&c->rq);
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_deactivate_txqsq(&c->sq[tc]);
+}
+
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
if (c->xdp)
- mlx5e_close_sq(&c->xdp_sq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq);
mlx5e_close_sqs(c);
- mlx5e_close_sq(&c->icosq);
+ mlx5e_close_icosq(&c->icosq);
napi_disable(&c->napi);
if (c->xdp)
- mlx5e_close_cq(&c->xdp_sq.cq);
+ mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1664,17 +1847,16 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
}
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- switch (priv->params.rq_wq_type) {
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides,
- priv->params.mpwqe_log_num_strides - 9);
- MLX5_SET(wq, wq, log_wqe_stride_size,
- priv->params.mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
@@ -1683,14 +1865,14 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
+ MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
+ MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
-
- param->am_enabled = priv->params.rx_am_enabled;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1715,17 +1897,14 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
-
- param->max_inline = priv->params.tx_max_inline;
- param->min_inline_mode = priv->params.tx_min_inline_mode;
- param->type = MLX5E_SQ_TXQ;
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1737,37 +1916,36 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
u8 log_cq_size;
- switch (priv->params.rq_wq_type) {
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = priv->params.log_rq_size +
- priv->params.mpwqe_log_num_strides;
+ log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- log_cq_size = priv->params.log_rq_size;
+ log_cq_size = params->log_rq_size;
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
- if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = priv->params.rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
@@ -1775,8 +1953,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_cq_param *param,
- u8 log_wq_size)
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
@@ -1788,8 +1966,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param,
- u8 log_wq_size)
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -1798,162 +1976,119 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
-
- param->type = MLX5E_SQ_ICO;
}
static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
-
- param->max_inline = priv->params.tx_max_inline;
- param->min_inline_mode = priv->params.tx_min_inline_mode;
- param->type = MLX5E_SQ_XDP;
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
}
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- mlx5e_build_rq_param(priv, &cparam->rq);
- mlx5e_build_sq_param(priv, &cparam->sq);
- mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
- mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
- mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
- mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
- mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
+ mlx5e_build_rq_param(priv, params, &cparam->rq);
+ mlx5e_build_sq_param(priv, params, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
+ mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
}
-static int mlx5e_open_channels(struct mlx5e_priv *priv)
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs)
{
struct mlx5e_channel_param *cparam;
- int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
- int j;
-
- priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
- GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
- sizeof(struct mlx5e_sq *), GFP_KERNEL);
+ chs->num = chs->params.num_channels;
+ chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ if (!chs->c || !cparam)
+ goto err_free;
- if (!priv->channel || !priv->txq_to_sq_map || !cparam)
- goto err_free_txq_to_sq_map;
-
- mlx5e_build_channel_param(priv, cparam);
-
- for (i = 0; i < nch; i++) {
- err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
- if (err)
- goto err_close_channels;
- }
-
- for (j = 0; j < nch; j++) {
- err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ mlx5e_build_channel_param(priv, &chs->params, cparam);
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
if (err)
goto err_close_channels;
}
- /* FIXME: This is a W/A for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
- */
- netif_tx_start_all_queues(priv->netdev);
-
kfree(cparam);
return 0;
err_close_channels:
for (i--; i >= 0; i--)
- mlx5e_close_channel(priv->channel[i]);
+ mlx5e_close_channel(chs->c[i]);
-err_free_txq_to_sq_map:
- kfree(priv->txq_to_sq_map);
- kfree(priv->channel);
+err_free:
+ kfree(chs->c);
kfree(cparam);
-
+ chs->num = 0;
return err;
}
-static void mlx5e_close_channels(struct mlx5e_priv *priv)
+static void mlx5e_activate_channels(struct mlx5e_channels *chs)
{
int i;
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
- */
- netif_tx_stop_all_queues(priv->netdev);
- netif_tx_disable(priv->netdev);
-
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_close_channel(priv->channel[i]);
-
- kfree(priv->txq_to_sq_map);
- kfree(priv->channel);
+ for (i = 0; i < chs->num; i++)
+ mlx5e_activate_channel(chs->c[i]);
}
-static int mlx5e_rx_hash_fn(int hfunc)
+static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
- return (hfunc == ETH_RSS_HASH_TOP) ?
- MLX5_RX_HASH_FN_TOEPLITZ :
- MLX5_RX_HASH_FN_INVERTED_XOR8;
-}
-
-static int mlx5e_bits_invert(unsigned long a, int size)
-{
- int inv = 0;
+ int err = 0;
int i;
- for (i = 0; i < size; i++)
- inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
+ if (err)
+ break;
+ }
- return inv;
+ return err;
}
-static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
- int ix = i;
- u32 rqn;
-
- if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
-
- ix = priv->params.indirection_rqt[ix];
- rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn;
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
+ for (i = 0; i < chs->num; i++)
+ mlx5e_deactivate_channel(chs->c[i]);
}
-static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
- int ix)
+void mlx5e_close_channels(struct mlx5e_channels *chs)
{
- u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn;
+ int i;
- MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
+ for (i = 0; i < chs->num; i++)
+ mlx5e_close_channel(chs->c[i]);
+
+ kfree(chs->c);
+ chs->num = 0;
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
- int ix, struct mlx5e_rqt *rqt)
+static int
+mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
int inlen;
int err;
u32 *in;
+ int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1965,10 +2100,8 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- if (sz > 1) /* RSS */
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- else
- mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
if (!err)
@@ -1984,11 +2117,15 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
}
-static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
{
struct mlx5e_rqt *rqt = &priv->indir_rqt;
+ int err;
- return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+ err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
+ if (err)
+ mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
+ return err;
}
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
@@ -1999,7 +2136,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
rqt = &priv->direct_tir[ix].rqt;
- err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
+ err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
if (err)
goto err_destroy_rqts;
}
@@ -2007,13 +2144,64 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
return 0;
err_destroy_rqts:
+ mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
for (ix--; ix >= 0; ix--)
mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err;
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+ return inv;
+}
+
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
+ struct mlx5e_redirect_rqt_param rrp, void *rqtc)
+{
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ u32 rqn;
+
+ if (rrp.is_rss) {
+ int ix = i;
+
+ if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+
+ ix = priv->channels.params.indirection_rqt[ix];
+ rqn = rrp.rss.channels->c[ix]->rq.rqn;
+ } else {
+ rqn = rrp.rqn;
+ }
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+ struct mlx5e_redirect_rqt_param rrp)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
@@ -2029,41 +2217,86 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- if (sz > 1) /* RSS */
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- else
- mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
-
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-
+ mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in);
-
return err;
}
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_redirect_rqt_param rrp)
+{
+ if (!rrp.is_rss)
+ return rrp.rqn;
+
+ if (ix >= rrp.rss.channels->num)
+ return priv->drop_rq.rqn;
+
+ return rrp.rss.channels->c[ix]->rq.rqn;
+}
+
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
+ struct mlx5e_redirect_rqt_param rrp)
{
u32 rqtn;
int ix;
if (priv->indir_rqt.enabled) {
+ /* RSS RQ table */
rqtn = priv->indir_rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < priv->params.num_channels; ix++) {
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ struct mlx5e_redirect_rqt_param direct_rrp = {
+ .is_rss = false,
+ {
+ .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
+ },
+ };
+
+ /* Direct RQ Tables */
if (!priv->direct_tir[ix].rqt.enabled)
continue;
+
rqtn = priv->direct_tir[ix].rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+ mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
}
}
-static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs)
+{
+ struct mlx5e_redirect_rqt_param rrp = {
+ .is_rss = true,
+ {
+ .rss = {
+ .channels = chs,
+ .hfunc = chs->params.rss_hfunc,
+ }
+ },
+ };
+
+ mlx5e_redirect_rqts(priv, rrp);
+}
+
+static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
+{
+ struct mlx5e_redirect_rqt_param drop_rrp = {
+ .is_rss = false,
+ {
+ .rqn = priv->drop_rq.rqn,
+ },
+ };
+
+ mlx5e_redirect_rqts(priv, drop_rrp);
+}
+
+static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
{
- if (!priv->params.lro_en)
+ if (!params->lro_en)
return;
#define ROUGH_MAX_L2_L3_HDR_SZ 256
@@ -2072,13 +2305,13 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+ (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
- enum mlx5e_traffic_types tt)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+ enum mlx5e_traffic_types tt,
+ void *tirc)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
@@ -2094,16 +2327,15 @@ void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
- MLX5_SET(tirc, tirc, rx_hash_fn,
- mlx5e_rx_hash_fn(priv->params.rss_hfunc));
- if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
+ if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ memcpy(rss_key, params->toeplitz_hash_key, len);
}
switch (tt) {
@@ -2208,7 +2440,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
@@ -2258,9 +2490,9 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
*mtu = MLX5E_HW2SW_MTU(hw_mtu);
}
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net_device *netdev = priv->netdev;
u16 mtu;
int err;
@@ -2280,8 +2512,8 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int nch = priv->params.num_channels;
- int ntc = priv->params.num_tc;
+ int nch = priv->channels.params.num_channels;
+ int ntc = priv->channels.params.num_tc;
int tc;
netdev_reset_tc(netdev);
@@ -2298,53 +2530,116 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int i, tc;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ c = priv->channels.c[i];
+ for (tc = 0; tc < c->num_tc; tc++) {
+ sq = &c->sq[tc];
+ priv->txq2sq[sq->txq_ix] = sq;
+ }
+ }
+}
+
+static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev)
+{
+ return (MLX5_CAP_GEN(mdev, vport_group_manager) &&
+ MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH);
+}
+
+void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+{
+ int num_txqs = priv->channels.num * priv->channels.params.num_tc;
+ struct net_device *netdev = priv->netdev;
+
+ mlx5e_netdev_set_tcs(netdev);
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->channels.num);
+
+ mlx5e_build_channels_tx_maps(priv);
+ mlx5e_activate_channels(&priv->channels);
+ netif_tx_start_all_queues(priv->netdev);
+
+ if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
+ mlx5e_add_sqs_fwd_rules(priv);
+
+ mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+ mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
+}
+
+void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqts_to_drop(priv);
+
+ if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+ mlx5e_deactivate_channels(&priv->channels);
+}
+
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify)
+{
+ struct net_device *netdev = priv->netdev;
+ int new_num_txqs;
+
+ new_num_txqs = new_chs->num * new_chs->params.num_tc;
+
+ netif_carrier_off(netdev);
+
+ if (new_num_txqs < netdev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(netdev, new_num_txqs);
+
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+
+ priv->channels = *new_chs;
+
+ /* New channels are ready to roll, modify HW settings if needed */
+ if (hw_modify)
+ hw_modify(priv);
+
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
+
+ mlx5e_update_carrier(priv);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_txqs;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_netdev_set_tcs(netdev);
-
- num_txqs = priv->params.num_channels * priv->params.num_tc;
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
-
- err = mlx5e_open_channels(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
- __func__, err);
+ err = mlx5e_open_channels(priv, &priv->channels);
+ if (err)
goto err_clear_state_opened_flag;
- }
-
- err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
- mlx5e_redirect_rqts(priv);
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
-#ifdef CONFIG_RFS_ACCEL
- priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
-#endif
+
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
- err = mlx5e_add_sqs_fwd_rules(priv);
- if (err)
- goto err_close_channels;
- }
return 0;
-err_close_channels:
- mlx5e_close_channels(priv);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
@@ -2365,7 +2660,6 @@ int mlx5e_open(struct net_device *netdev)
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -2375,13 +2669,10 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5e_remove_sqs_fwd_rules(priv);
-
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
- mlx5e_redirect_rqts(priv);
- mlx5e_close_channels(priv);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
return 0;
}
@@ -2401,11 +2692,10 @@ int mlx5e_close(struct net_device *netdev)
return err;
}
-static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
- struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int err;
@@ -2417,111 +2707,85 @@ static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
if (err)
return err;
- rq->priv = priv;
+ rq->mdev = mdev;
return 0;
}
-static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
- struct mlx5e_cq *cq,
- struct mlx5e_cq_param *param)
+static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_core_cq *mcq = &cq->mcq;
- int eqn_not_used;
- unsigned int irqn;
- int err;
-
- err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
- &cq->wq_ctrl);
- if (err)
- return err;
-
- mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
- mcq->cqe_sz = 64;
- mcq->set_ci_db = cq->wq_ctrl.db.db;
- mcq->arm_db = cq->wq_ctrl.db.db + 1;
- *mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
- mcq->vector = param->eq_ix;
- mcq->comp = mlx5e_completion_event;
- mcq->event = mlx5e_cq_error_event;
- mcq->irqn = irqn;
-
- cq->priv = priv;
-
- return 0;
+ return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *drop_rq)
{
- struct mlx5e_cq_param cq_param;
- struct mlx5e_rq_param rq_param;
- struct mlx5e_rq *rq = &priv->drop_rq;
- struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ struct mlx5e_cq_param cq_param = {};
+ struct mlx5e_rq_param rq_param = {};
+ struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- memset(&cq_param, 0, sizeof(cq_param));
- memset(&rq_param, 0, sizeof(rq_param));
mlx5e_build_drop_rq_param(&rq_param);
- err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
return err;
- err = mlx5e_enable_cq(cq, &cq_param);
+ err = mlx5e_create_cq(cq, &cq_param);
if (err)
- goto err_destroy_cq;
+ goto err_free_cq;
- err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
if (err)
- goto err_disable_cq;
+ goto err_destroy_cq;
- err = mlx5e_enable_rq(rq, &rq_param);
+ err = mlx5e_create_rq(drop_rq, &rq_param);
if (err)
- goto err_destroy_rq;
+ goto err_free_rq;
return 0;
-err_destroy_rq:
- mlx5e_destroy_rq(&priv->drop_rq);
-
-err_disable_cq:
- mlx5e_disable_cq(&priv->drop_rq.cq);
+err_free_rq:
+ mlx5e_free_rq(drop_rq);
err_destroy_cq:
- mlx5e_destroy_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(cq);
+
+err_free_cq:
+ mlx5e_free_cq(cq);
return err;
}
-static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
- mlx5e_disable_rq(&priv->drop_rq);
- mlx5e_destroy_rq(&priv->drop_rq);
- mlx5e_disable_cq(&priv->drop_rq.cq);
- mlx5e_destroy_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_rq(drop_rq);
+ mlx5e_free_rq(drop_rq);
+ mlx5e_destroy_cq(&drop_rq->cq);
+ mlx5e_free_cq(&drop_rq->cq);
}
-static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
+ u32 underlay_qpn, u32 *tisn)
{
- struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
}
-static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5_core_destroy_tis(mdev, tisn);
}
int mlx5e_create_tises(struct mlx5e_priv *priv)
@@ -2530,7 +2794,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++) {
- err = mlx5e_create_tis(priv, tc);
+ err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
if (err)
goto err_close_tises;
}
@@ -2539,7 +2803,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
err_close_tises:
for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv, tc);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
return err;
}
@@ -2549,34 +2813,34 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv, tc);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
- enum mlx5e_traffic_types tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
}
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
- u32 rqtn)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
-static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
{
struct mlx5e_tir *tir;
void *tirc;
@@ -2594,7 +2858,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
memset(in, 0, inlen);
tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx(priv, tt, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_tirs;
@@ -2605,6 +2869,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
return 0;
err_destroy_tirs:
+ mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
for (tt--; tt >= 0; tt--)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
@@ -2632,8 +2897,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
memset(in, 0, inlen);
tir = &priv->direct_tir[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_direct_tir_ctx(priv, tirc,
- priv->direct_tir[ix].rqt.rqtn);
+ mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_ch_tirs;
@@ -2644,6 +2908,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
return 0;
err_destroy_ch_tirs:
+ mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
for (ix--; ix >= 0; ix--)
mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
@@ -2652,7 +2917,7 @@ err_destroy_ch_tirs:
return err;
}
-static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
int i;
@@ -2669,16 +2934,27 @@ void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
}
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
int err = 0;
int i;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return 0;
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
+ if (err)
+ return err;
+ }
- for (i = 0; i < priv->params.num_channels; i++) {
- err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
+ return 0;
+}
+
+static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
if (err)
return err;
}
@@ -2689,7 +2965,7 @@ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened;
+ struct mlx5e_channels new_channels = {};
int err = 0;
if (tc && tc != MLX5E_MAX_NUM_TC)
@@ -2697,17 +2973,21 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.num_tc = tc ? tc : 1;
- priv->params.num_tc = tc ? tc : 1;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+out:
mutex_unlock(&priv->state_lock);
-
return err;
}
@@ -2737,7 +3017,9 @@ mqprio:
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return mlx5e_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
}
static void
@@ -2822,26 +3104,31 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- int err;
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+ bool reset;
mutex_lock(&priv->state_lock);
- if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
- mlx5e_close_locked(priv->netdev);
+ reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
+ reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
- priv->params.lro_en = enable;
- err = mlx5e_modify_tirs_lro(priv);
- if (err) {
- netdev_err(netdev, "lro modify failed, %d\n", err);
- priv->params.lro_en = !enable;
+ new_channels.params = priv->channels.params;
+ new_channels.params.lro_en = enable;
+
+ if (!reset) {
+ priv->channels.params = new_channels.params;
+ err = mlx5e_modify_tirs_lro(priv);
+ goto out;
}
- if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
- mlx5e_open_locked(priv->netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+out:
mutex_unlock(&priv->state_lock);
-
return err;
}
@@ -2878,23 +3165,44 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
-static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mutex_lock(&priv->state_lock);
- priv->params.vlan_strip_disable = !enable;
- err = mlx5e_modify_rqs_vsd(priv, !enable);
+ priv->channels.params.scatter_fcs_en = enable;
+ err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
if (err)
- priv->params.vlan_strip_disable = enable;
+ priv->channels.params.scatter_fcs_en = !enable;
mutex_unlock(&priv->state_lock);
return err;
}
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->channels.params.vlan_strip_disable = !enable;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
+ if (err)
+ priv->channels.params.vlan_strip_disable = enable;
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
#ifdef CONFIG_RFS_ACCEL
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -2947,6 +3255,8 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
+ set_feature_rx_fcs);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
@@ -2960,28 +3270,38 @@ static int mlx5e_set_features(struct net_device *netdev,
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened;
+ struct mlx5e_channels new_channels = {};
+ int curr_mtu;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = !priv->params.lro_en &&
- (priv->params.rq_wq_type !=
+ reset = !priv->channels.params.lro_en &&
+ (priv->channels.params.rq_wq_type !=
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened && reset)
- mlx5e_close_locked(netdev);
+ reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
+ curr_mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mlx5e_set_dev_port_mtu(netdev);
- if (was_opened && reset)
- err = mlx5e_open_locked(netdev);
+ if (!reset) {
+ mlx5e_set_dev_port_mtu(priv);
+ goto out;
+ }
- mutex_unlock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err) {
+ netdev->mtu = curr_mtu;
+ goto out;
+ }
+
+ mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+out:
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -3186,8 +3506,8 @@ static void mlx5e_tx_timeout(struct net_device *dev)
netdev_err(dev, "TX timeout detected\n");
- for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
- struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+ for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
@@ -3219,7 +3539,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
- reset = (!priv->xdp_prog || !prog);
+ reset = (!priv->channels.params.xdp_prog || !prog);
if (was_opened && reset)
mlx5e_close_locked(netdev);
@@ -3227,7 +3547,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* num_channels is invariant here, so we can take the
* batched reference right upfront.
*/
- prog = bpf_prog_add(prog, priv->params.num_channels);
+ prog = bpf_prog_add(prog, priv->channels.num);
if (IS_ERR(prog)) {
err = PTR_ERR(prog);
goto unlock;
@@ -3237,12 +3557,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* exchange programs, extra prog reference we got from caller
* as long as we don't fail from this point onwards.
*/
- old_prog = xchg(&priv->xdp_prog, prog);
+ old_prog = xchg(&priv->channels.params.xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_priv_params(priv);
+ mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
@@ -3253,8 +3573,8 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
- for (i = 0; i < priv->params.num_channels; i++) {
- struct mlx5e_channel *c = priv->channel[i];
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
napi_synchronize(&c->napi);
@@ -3280,7 +3600,7 @@ static bool mlx5e_xdp_attached(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return !!priv->xdp_prog;
+ return !!priv->channels.params.xdp_prog;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3303,10 +3623,12 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static void mlx5e_netpoll(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+
int i;
- for (i = 0; i < priv->params.num_channels; i++)
- napi_schedule(&priv->channel[i]->napi);
+ for (i = 0; i < chs->num; i++)
+ napi_schedule(&chs->c[i]->napi);
}
#endif
@@ -3463,6 +3785,12 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw < 40000) && (pci_bw < link_speed));
}
+static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return !(link_speed && pci_bw &&
+ (pci_bw <= 16000) && (pci_bw < link_speed));
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
params->rx_cq_period_mode = cq_period_mode;
@@ -3475,6 +3803,13 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ if (params->rx_am_enabled)
+ params->rx_cq_moderation =
+ mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3489,75 +3824,81 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 max_channels)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 cq_period_mode = 0;
u32 link_speed = 0;
u32 pci_bw = 0;
- u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
+ params->num_channels = max_channels;
+ params->num_tc = 1;
- priv->params.lro_timeout =
- mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
- priv->params.log_sq_size = is_kdump_kernel() ?
+ /* SQ */
+ params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
- priv->params.rx_cqe_compress_def = false;
+ params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
- priv->params.rx_cqe_compress_def =
- cqe_compress_heuristic(link_speed, pci_bw);
- }
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS,
- priv->params.rx_cqe_compress_def);
-
- mlx5e_set_rq_priv_params(priv);
- if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- priv->params.lro_en = true;
-
- priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
- priv->params.tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- priv->params.tx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
- if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ MLX5_CAP_GEN(mdev, vport_group_manager))
+ params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+
+ /* RQ */
+ mlx5e_set_rq_params(mdev, params);
+
+ /* HW LRO */
+ /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
+ params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
+ /* CQ moderation params */
+ cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+
+ params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+
+ /* TX inline */
+ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+ params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
- priv->params.num_tc = 1;
- priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
+ /* RSS */
+ params->rss_hfunc = ETH_RSS_HASH_XOR;
+ netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, max_channels);
+}
- netdev_rss_key_fill(priv->params.toeplitz_hash_key,
- sizeof(priv->params.toeplitz_hash_key));
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
- /* Initialize pflags */
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mutex_init(&priv->state_lock);
@@ -3642,13 +3983,19 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (fcs_supported)
netdev->hw_features |= NETIF_F_RXALL;
+ if (MLX5_CAP_ETH(mdev, scatter_fcs))
+ netdev->hw_features |= NETIF_F_RXFCS;
+
netdev->features = netdev->hw_features;
- if (!priv->params.lro_en)
+ if (!priv->channels.params.lro_en)
netdev->features &= ~NETIF_F_LRO;
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
+ if (!priv->channels.params.scatter_fcs_en)
+ netdev->features &= ~NETIF_F_RXFCS;
+
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
@@ -3708,39 +4055,30 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_vxlan_cleanup(priv);
- if (priv->xdp_prog)
- bpf_prog_put(priv->xdp_prog);
+ if (priv->channels.params.xdp_prog)
+ bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- int i;
- err = mlx5e_create_indirect_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+ err = mlx5e_create_indirect_rqt(priv);
+ if (err)
return err;
- }
err = mlx5e_create_direct_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ if (err)
goto err_destroy_indirect_rqts;
- }
err = mlx5e_create_indirect_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+ if (err)
goto err_destroy_direct_rqts;
- }
err = mlx5e_create_direct_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ if (err)
goto err_destroy_indirect_tirs;
- }
err = mlx5e_create_flow_steering(priv);
if (err) {
@@ -3761,8 +4099,7 @@ err_destroy_direct_tirs:
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- for (i = 0; i < priv->profile->max_nch(mdev); i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
return err;
@@ -3770,14 +4107,11 @@ err_destroy_indirect_rqts:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- int i;
-
mlx5e_tc_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
}
@@ -3801,21 +4135,22 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ u16 max_mtu;
+
+ mlx5e_init_l2_addr(priv);
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep);
- }
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_register_vport_reps(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -3828,16 +4163,29 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
}
queue_work(priv->wq, &priv->set_rx_mode_work);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ rtnl_lock();
+ if (netif_running(priv->netdev))
+ mlx5e_close(priv->netdev);
+ netif_device_detach(priv->netdev);
+ rtnl_unlock();
queue_work(priv->wq, &priv->set_rx_mode_work);
+
if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5_eswitch_unregister_vport_rep(esw, 0);
+ mlx5e_unregister_vport_reps(priv);
+
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
}
@@ -3853,9 +4201,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_stats,
.max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
};
+/* mlx5e generic netdev management API (move to en_common.c) */
+
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
void *ppriv)
@@ -3872,6 +4224,10 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
+#ifdef CONFIG_RFS_ACCEL
+ netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
profile->init(mdev, netdev, profile, ppriv);
netif_carrier_off(netdev);
@@ -3891,14 +4247,12 @@ err_cleanup_nic:
return NULL;
}
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
const struct mlx5e_profile *profile;
- struct mlx5e_priv *priv;
- u16 max_mtu;
int err;
- priv = netdev_priv(netdev);
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -3906,7 +4260,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
if (err)
goto out;
- err = mlx5e_open_drop_rq(priv);
+ err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_cleanup_tx;
@@ -3918,28 +4272,13 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
mlx5e_create_q_counter(priv);
- mlx5e_init_l2_addr(priv);
-
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
-
- mlx5e_set_dev_port_mtu(netdev);
-
if (profile->enable)
profile->enable(priv);
- rtnl_lock();
- if (netif_running(netdev))
- mlx5e_open(netdev);
- netif_device_attach(netdev);
- rtnl_unlock();
-
return 0;
err_close_drop_rq:
- mlx5e_close_drop_rq(priv);
+ mlx5e_close_drop_rq(&priv->drop_rq);
err_cleanup_tx:
profile->cleanup_tx(priv);
@@ -3948,66 +4287,34 @@ out:
return err;
}
-static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
- u8 mac[ETH_ALEN];
-
- if (!MLX5_CAP_GEN(mdev, vport_group_manager))
- return;
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
-
- for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
-
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
- }
-}
-
-static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
-
- if (!MLX5_CAP_GEN(mdev, vport_group_manager))
- return;
-
- for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
-}
-
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+void mlx5e_detach_netdev(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- rtnl_lock();
- if (netif_running(netdev))
- mlx5e_close(netdev);
- netif_device_detach(netdev);
- rtnl_unlock();
-
if (profile->disable)
profile->disable(priv);
flush_workqueue(priv->wq);
mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(priv);
+ mlx5e_close_drop_rq(&priv->drop_rq);
profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work);
}
+void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
+{
+ const struct mlx5e_profile *profile = priv->profile;
+ struct net_device *netdev = priv->netdev;
+
+ destroy_workqueue(priv->wq);
+ if (profile->cleanup)
+ profile->cleanup(priv);
+ free_netdev(netdev);
+}
+
/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
* hardware contexts and to connect it to the current netdev.
*/
@@ -4024,13 +4331,12 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
if (err)
return err;
- err = mlx5e_attach_netdev(mdev, netdev);
+ err = mlx5e_attach_netdev(priv);
if (err) {
mlx5e_destroy_mdev_resources(mdev);
return err;
}
- mlx5e_register_vport_rep(mdev);
return 0;
}
@@ -4042,8 +4348,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
if (!netif_device_present(netdev))
return;
- mlx5e_unregister_vport_rep(mdev);
- mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
}
@@ -4051,7 +4356,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- void *ppriv = NULL;
+ struct mlx5e_rep_priv *rpriv = NULL;
void *priv;
int vport;
int err;
@@ -4061,10 +4366,17 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (err)
return NULL;
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- ppriv = &esw->offloads.vport_reps[0];
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, Failed to alloc rep priv data\n");
+ return NULL;
+ }
+ rpriv->rep = &esw->offloads.vport_reps[0];
+ }
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
goto err_unregister_reps;
@@ -4090,33 +4402,25 @@ err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(mdev, priv);
+ mlx5e_destroy_netdev(priv);
err_unregister_reps:
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ kfree(rpriv);
return NULL;
}
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
-{
- const struct mlx5e_profile *profile = priv->profile;
- struct net_device *netdev = priv->netdev;
-
- destroy_workqueue(priv->wq);
- if (profile->cleanup)
- profile->cleanup(priv);
- free_netdev(netdev);
-}
-
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
+ void *ppriv = priv->ppriv;
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
- mlx5e_destroy_netdev(mdev, priv);
+ mlx5e_destroy_netdev(priv);
+ kfree(ppriv);
}
static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f621373bd7a5..79462c0368a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -34,10 +34,14 @@
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
+#include <net/netevent.h>
+#include <net/arp.h>
#include "eswitch.h"
#include "en.h"
+#include "en_rep.h"
#include "en_tc.h"
+#include "fs_core.h"
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -75,7 +79,8 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct rtnl_link_stats64 *vport_stats;
struct ifla_vf_stats vf_stats;
int err;
@@ -102,14 +107,16 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
int i, j;
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->params.num_channels; i++) {
- rq_stats = &priv->channel[i]->rq.stats;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- for (j = 0; j < priv->params.num_tc; j++) {
- sq_stats = &priv->channel[i]->sq[j].stats;
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -163,7 +170,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw->mode == SRIOV_NONE)
@@ -182,66 +190,426 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
}
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
-
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5e_channel *c;
- int n, tc, err, num_sqs = 0;
+ int n, tc, num_sqs = 0;
+ int err = -ENOMEM;
u16 *sqs;
- sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+ sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
if (!sqs)
- return -ENOMEM;
+ goto out;
- for (n = 0; n < priv->params.num_channels; n++) {
- c = priv->channel[n];
+ for (n = 0; n < priv->channels.num; n++) {
+ c = priv->channels.c[n];
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
-
kfree(sqs);
+
+out:
+ if (err)
+ netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
return err;
}
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
+ DELAY_PROBE_TIME);
+#else
+ unsigned long ipv6_interval = ~0UL;
+#endif
+ unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
+ DELAY_PROBE_TIME);
+ struct net_device *netdev = rpriv->rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- return mlx5e_add_sqs_fwd_rules(priv);
- return 0;
+ rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
}
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5_fc_queue_stats_work(priv->mdev,
+ &neigh_update->neigh_stats_work,
+ neigh_update->min_interval);
}
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
+ neigh_update.neigh_stats_work.work);
+ struct net_device *netdev = rpriv->rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_remove_sqs_fwd_rules(priv);
+ rtnl_lock();
+ if (!list_empty(&rpriv->neigh_update.neigh_list))
+ mlx5e_rep_queue_neigh_stats_work(priv);
- /* clean (and re-init) existing uplink offloaded TC rules */
- mlx5e_tc_cleanup(priv);
- mlx5e_tc_init(priv);
+ list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
+ mlx5e_tc_update_neigh_used_value(nhe);
+
+ rtnl_unlock();
+}
+
+static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ refcount_inc(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt))
+ kfree(nhe);
+}
+
+static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN])
+{
+ struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+
+ ASSERT_RTNL();
+
+ if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
+ !ether_addr_equal(e->h_dest, ha))
+ mlx5e_tc_encap_flows_del(priv, e);
+
+ if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ ether_addr_copy(e->h_dest, ha);
+ ether_addr_copy(eth->h_dest, ha);
+
+ mlx5e_tc_encap_flows_add(priv, e);
+ }
+}
+
+static void mlx5e_rep_neigh_update(struct work_struct *work)
+{
+ struct mlx5e_neigh_hash_entry *nhe =
+ container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
+ struct neighbour *n = nhe->n;
+ struct mlx5e_encap_entry *e;
+ unsigned char ha[ETH_ALEN];
+ struct mlx5e_priv *priv;
+ bool neigh_connected;
+ bool encap_connected;
+ u8 nud_state, dead;
+
+ rtnl_lock();
+
+ /* If these parameters are changed after we release the lock,
+ * we'll receive another event letting us know about it.
+ * We use this lock to avoid inconsistency between the neigh validity
+ * and it's hw address.
+ */
+ read_lock_bh(&n->lock);
+ memcpy(ha, n->ha, ETH_ALEN);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+
+ neigh_connected = (nud_state & NUD_VALID) && !dead;
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ priv = netdev_priv(e->out_dev);
+
+ if (encap_connected != neigh_connected ||
+ !ether_addr_equal(e->h_dest, ha))
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ }
+ mlx5e_rep_neigh_entry_release(nhe);
+ rtnl_unlock();
+ neigh_release(n);
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh);
+
+static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ neigh_update.netevent_nb);
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct net_device *netdev = rpriv->rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+ struct mlx5e_neigh m_neigh = {};
+ struct neigh_parms *p;
+ struct neighbour *n;
+ bool found = false;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+ if (n->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ m_neigh.dev = n->dev;
+ m_neigh.family = n->ops->family;
+ memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+
+ /* We are in atomic context and can't take RTNL mutex, so use
+ * spin_lock_bh to lookup the neigh table. bh is used since
+ * netevent can be called from a softirq context.
+ */
+ spin_lock_bh(&neigh_update->encap_lock);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
+ if (!nhe) {
+ spin_unlock_bh(&neigh_update->encap_lock);
+ return NOTIFY_DONE;
+ }
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+ mlx5e_rep_neigh_entry_hold(nhe);
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+ spin_unlock_bh(&neigh_update->encap_lock);
+ break;
+
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We check the device is present since we don't care about
+ * changes in the default table, we only care about changes
+ * done per device delay prob time parameter.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+#else
+ if (!p->dev || p->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use spin_lock_bh to walk the neigh list and look for
+ * the relevant device. bh is used since netevent can be
+ * called from a softirq context.
+ */
+ spin_lock_bh(&neigh_update->encap_lock);
+ list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+ if (p->dev == nhe->m_neigh.dev) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&neigh_update->encap_lock);
+ if (!found)
+ return NOTIFY_DONE;
+
+ neigh_update->min_interval = min_t(unsigned long,
+ NEIGH_VAR(p, DELAY_PROBE_TIME),
+ neigh_update->min_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev,
+ neigh_update->min_interval);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static const struct rhashtable_params mlx5e_neigh_ht_params = {
+ .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
+ .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
+ .key_len = sizeof(struct mlx5e_neigh),
+ .automatic_shrinking = true,
+};
+
+static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ int err;
+
+ err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&neigh_update->neigh_list);
+ spin_lock_init(&neigh_update->encap_lock);
+ INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
+ mlx5e_rep_neigh_stats_work);
+ mlx5e_rep_neigh_update_init_interval(rpriv);
+
+ rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ if (err)
+ goto out_err;
+ return 0;
+
+out_err:
+ rhashtable_destroy(&neigh_update->neigh_ht);
+ return err;
+}
+
+static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+
+ unregister_netevent_notifier(&neigh_update->netevent_nb);
+
+ flush_workqueue(priv->wq); /* flush neigh update works */
+
+ cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+
+ rhashtable_destroy(&neigh_update->neigh_ht);
+}
+
+static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int err;
+
+ err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ spin_lock_bh(&rpriv->neigh_update.encap_lock);
+
+ list_del(&nhe->neigh_list);
+
+ rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ spin_unlock_bh(&rpriv->neigh_update.encap_lock);
+}
+
+/* This function must only be called under RTNL lock or under the
+ * representor's encap_lock in case RTNL mutex can't be held.
+ */
+static struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+
+ return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+}
+
+static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe)
+{
+ int err;
+
+ *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
+ if (!*nhe)
+ return -ENOMEM;
+
+ memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ INIT_LIST_HEAD(&(*nhe)->encap_list);
+ refcount_set(&(*nhe)->refcnt, 1);
+
+ err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ kfree(*nhe);
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ /* The neigh hash entry must be removed from the hash table regardless
+ * of the reference count value, so it won't be found by the next
+ * neigh notification call. The neigh hash entry reference count is
+ * incremented only during creation and neigh notification calls and
+ * protects from freeing the nhe struct.
+ */
+ mlx5e_rep_neigh_entry_remove(priv, nhe);
+ mlx5e_rep_neigh_entry_release(nhe);
+}
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_neigh_hash_entry *nhe;
+ int err;
+
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!nhe) {
+ err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ if (err)
+ return err;
+ }
+ list_add(&e->encap_list, &nhe->encap_list);
+ return 0;
+}
+
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_neigh_hash_entry *nhe;
+
+ list_del(&e->encap_list);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+
+ if (list_empty(&nhe->encap_list))
+ mlx5e_rep_neigh_entry_destroy(priv, nhe);
}
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -259,7 +627,8 @@ static int mlx5e_rep_open(struct net_device *dev)
static int mlx5e_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
@@ -271,7 +640,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
int ret;
ret = snprintf(buf, len, "%d", rep->vport - 1);
@@ -314,18 +684,25 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep;
- if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return false;
+
+ rep = rpriv->rep;
+ if (esw->mode == SRIOV_OFFLOADS &&
+ rep && rep->vport == FDB_UPLINK_VPORT)
return true;
return false;
}
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
@@ -397,42 +774,23 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_offload_stats = mlx5e_get_offload_stats,
};
-static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
- priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
-
- priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
- priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.num_tc = 1;
-
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
-
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
+ params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- mutex_init(&priv->state_lock);
+ params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ params->num_tc = 1;
+ params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -458,30 +816,39 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
void *ppriv)
{
- mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mutex_init(&priv->state_lock);
+
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ priv->channels.params.num_channels = profile->max_nch(mdev);
+ mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
int err;
- int i;
+
+ mlx5e_init_l2_addr(priv);
err = mlx5e_create_direct_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ if (err)
return err;
- }
err = mlx5e_create_direct_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ if (err)
goto err_destroy_direct_rqts;
- }
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
@@ -503,21 +870,19 @@ err_del_flow_rule:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
return err;
}
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- int i;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
}
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
@@ -546,56 +911,181 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.max_tc = 1,
};
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+/* e-Switch vport representors */
+
+static int
+mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = netdev_priv(rep->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ int err;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_add_sqs_fwd_rules(priv);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_rep_neigh_init(rpriv);
+ if (err)
+ goto err_remove_sqs;
+
+ return 0;
+
+err_remove_sqs:
+ mlx5e_remove_sqs_fwd_rules(priv);
+ return err;
+}
+
+static void
+mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_priv *priv = netdev_priv(rep->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* clean (and re-init) existing uplink offloaded TC rules */
+ mlx5e_tc_cleanup(priv);
+ mlx5e_tc_init(priv);
+
+ mlx5e_rep_neigh_cleanup(rpriv);
+}
+
+static int
+mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
int err;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv)
+ return -ENOMEM;
+
+ netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
+ kfree(rpriv);
return -EINVAL;
}
rep->netdev = netdev;
+ rpriv->rep = rep;
- err = mlx5e_attach_netdev(esw->dev, netdev);
+ err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n",
rep->vport);
goto err_destroy_netdev;
}
+ err = mlx5e_rep_neigh_init(rpriv);
+ if (err) {
+ pr_warn("Failed to initialized neighbours handling for vport %d\n",
+ rep->vport);
+ goto err_detach_netdev;
+ }
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_detach_netdev;
+ goto err_neigh_cleanup;
}
return 0;
+err_neigh_cleanup:
+ mlx5e_rep_neigh_cleanup(rpriv);
+
err_detach_netdev:
- mlx5e_detach_netdev(esw->dev, netdev);
+ mlx5e_detach_netdev(netdev_priv(netdev));
err_destroy_netdev:
- mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
-
+ mlx5e_destroy_netdev(netdev_priv(netdev));
+ kfree(rpriv);
return err;
}
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+static void
+mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct net_device *netdev = rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ void *ppriv = priv->ppriv;
+
+ unregister_netdev(rep->netdev);
+
+ mlx5e_rep_neigh_cleanup(rpriv);
+ mlx5e_detach_netdev(priv);
+ mlx5e_destroy_netdev(priv);
+ kfree(ppriv); /* mlx5e_rep_priv */
+}
+
+static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+ u8 mac[ETH_ALEN];
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep rep;
+
+ rep.load = mlx5e_vport_rep_load;
+ rep.unload = mlx5e_vport_rep_unload;
+ rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ }
+}
+
+static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+}
+
+void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+ rep.load = mlx5e_nic_rep_load;
+ rep.unload = mlx5e_nic_rep_unload;
+ rep.vport = FDB_UPLINK_VPORT;
+ rep.netdev = priv->netdev;
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+
+ mlx5e_rep_register_vf_vports(priv); /* VFs vports */
+}
+
+void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- unregister_netdev(netdev);
- mlx5e_detach_netdev(esw->dev, netdev);
- mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
+ mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
+ mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
new file mode 100644
index 000000000000..a0a1a7a1d6c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5E_REP_H__
+#define __MLX5E_REP_H__
+
+#include <net/ip_tunnels.h>
+#include <linux/rhashtable.h>
+#include "eswitch.h"
+#include "en.h"
+
+struct mlx5e_neigh_update_table {
+ struct rhashtable neigh_ht;
+ /* Save the neigh hash entries in a list in addition to the hash table
+ * (neigh_ht). In order to iterate easily over the neigh entries.
+ * Used for stats query.
+ */
+ struct list_head neigh_list;
+ /* protect lookup/remove operations */
+ spinlock_t encap_lock;
+ struct notifier_block netevent_nb;
+ struct delayed_work neigh_stats_work;
+ unsigned long min_interval; /* jiffies */
+};
+
+struct mlx5e_rep_priv {
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5e_neigh_update_table neigh_update;
+};
+
+struct mlx5e_neigh {
+ struct net_device *dev;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip;
+ int family;
+};
+
+struct mlx5e_neigh_hash_entry {
+ struct rhash_head rhash_node;
+ struct mlx5e_neigh m_neigh;
+
+ /* Save the neigh hash entry in a list on the representor in
+ * addition to the hash table. In order to iterate easily over the
+ * neighbour entries. Used for stats query.
+ */
+ struct list_head neigh_list;
+
+ /* encap list sharing the same neigh */
+ struct list_head encap_list;
+
+ /* valid only when the neigh reference is taken during
+ * neigh_update_work workqueue callback.
+ */
+ struct neighbour *n;
+ struct work_struct neigh_update_work;
+
+ /* neigh hash entry can be deleted only when the refcount is zero.
+ * refcount is needed to avoid neigh hash entry removal by TC, while
+ * it's used by the neigh notification call.
+ */
+ refcount_t refcnt;
+
+ /* Save the last reported time offloaded trafic pass over one of the
+ * neigh hash entry flows. Use it to periodically update the neigh
+ * 'used' value and avoid neigh deleting by the kernel.
+ */
+ unsigned long reported_lastuse;
+};
+
+enum {
+ /* set when the encap entry is successfully offloaded into HW */
+ MLX5_ENCAP_ENTRY_VALID = BIT(0),
+};
+
+struct mlx5e_encap_entry {
+ /* neigh hash entry list of encaps sharing the same neigh */
+ struct list_head encap_list;
+ struct mlx5e_neigh m_neigh;
+ /* a node of the eswitch encap hash table which keeping all the encap
+ * entries
+ */
+ struct hlist_node encap_hlist;
+ struct list_head flows;
+ u32 encap_id;
+ struct ip_tunnel_info tun_info;
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+
+ struct net_device *out_dev;
+ int tunnel_type;
+ u8 flags;
+ char *encap_header;
+ int encap_size;
+};
+
+void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
+void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp);
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+#endif /* __MLX5E_REP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bafcb349a50c..7b1566f0ae58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -39,6 +39,8 @@
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "en_rep.h"
+#include "ipoib.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -156,28 +158,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
-{
- bool was_opened;
-
- if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
- return;
-
- if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
- return;
-
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
- mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
-
- if (was_opened)
- mlx5e_open_locked(priv->netdev);
-
-}
-
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
@@ -331,7 +311,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- struct mlx5e_sq *sq = &rq->channel->icosq;
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
@@ -341,7 +321,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
- mlx5e_send_nop(sq, false);
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -353,7 +333,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
@@ -637,37 +617,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
- u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+ u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
const struct xdp_buff *xdp)
{
- struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5e_xdpsq *sq = &rq->xdpsq;
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
+ u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
- u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data;
+ prefetchw(wqe);
+
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
@@ -675,48 +654,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
return false;
}
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
- if (sq->db.xdp.doorbell) {
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->db.doorbell) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
- sq->db.xdp.doorbell = false;
+ sq->db.doorbell = false;
}
rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
return false;
}
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
- memset(wqe, 0, sizeof(*wqe));
+ cseg->fm_ce_se = 0;
dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
-
- ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
dseg++;
}
/* write the dma part */
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
- dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->db.xdp.di[pi] = *di;
- wi->opcode = MLX5_OPCODE_SEND;
- wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
- sq->pc += MLX5E_XDP_TX_WQEBBS;
+ sq->db.di[pi] = *di;
+ sq->pc++;
- sq->db.xdp.doorbell = true;
+ sq->db.doorbell = true;
rq->stats.xdp_tx++;
return true;
}
@@ -837,7 +810,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
@@ -932,7 +906,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto mpwrq_cqe_out;
}
- prefetch(skb->data);
+ prefetchw(skb->data);
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
@@ -950,7 +924,7 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
+ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
@@ -977,9 +951,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
- if (xdp_sq->db.xdp.doorbell) {
- mlx5e_xmit_xdp_doorbell(xdp_sq);
- xdp_sq->db.xdp.doorbell = false;
+ if (xdpsq->db.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdpsq);
+ xdpsq->db.doorbell = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -989,3 +963,152 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5e_rq *rq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.di[ci];
+
+ sqcc++;
+ /* Recycle RX page */
+ mlx5e_page_release(rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.di[ci];
+ sq->cc++;
+
+ mlx5e_page_release(rq, di, false);
+ }
+}
+
+#ifdef CONFIG_MLX5_CORE_IPOIB
+
+#define MLX5_IB_GRH_DGID_OFFSET 24
+#define MLX5_IB_GRH_BYTES 40
+#define MLX5_IPOIB_ENCAP_LEN 4
+#define MLX5_GID_SIZE 16
+
+static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u8 *dgid;
+ u8 g;
+
+ g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
+ if ((!g) || dgid[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
+ /* TODO: IB/ipoib: Allow mcast packets from other VFs
+ * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ */
+
+ skb_pull(skb, MLX5_IB_GRH_BYTES);
+
+ skb->protocol = *((__be16 *)(skb->data));
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, MLX5_IPOIB_ENCAP_LEN);
+
+ skb->dev = netdev;
+
+ rq->stats.csum_complete++;
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+}
+
+void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_counter_be;
+ struct sk_buff *skb;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
+
+ mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+#endif /* CONFIG_MLX5_CORE_IPOIB */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index cbfac06b7ffd..02dd3a95ed8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -293,7 +293,7 @@ void mlx5e_rx_am_work(struct work_struct *work)
struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
- mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
cur_profile.usec, cur_profile.pkts);
am->state = MLX5E_AM_START_MEASURE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5621dcfda4f1..5225f2226a67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -236,12 +236,9 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
{
int err = 0;
- err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
- if (err) {
- netdev_err(priv->netdev,
- "\tFailed to enable UC loopback err(%d)\n", err);
+ err = mlx5e_refresh_tirs(priv, true);
+ if (err)
return err;
- }
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -258,7 +255,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+ mlx5e_refresh_tirs(priv, false);
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5436866798f4..11c27e4fadf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -42,14 +42,25 @@
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/vxlan.h>
+#include <net/arp.h>
#include "en.h"
+#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+struct mlx5_nic_flow_attr {
+ u32 action;
+ u32 flow_tag;
+ u32 mod_hdr_id;
+};
+
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
+ MLX5E_TC_FLOW_NIC = BIT(1),
+ MLX5E_TC_FLOW_OFFLOADED = BIT(2),
};
struct mlx5e_tc_flow {
@@ -58,7 +69,16 @@ struct mlx5e_tc_flow {
u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
- struct mlx5_esw_flow_attr *attr;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+struct mlx5e_tc_flow_parse_attr {
+ struct mlx5_flow_spec spec;
+ int num_mod_hdr_actions;
+ void *mod_hdr_actions;
};
enum {
@@ -71,24 +91,26 @@ enum {
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- u32 action, u32 flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {
- .action = action,
- .flow_tag = flow_tag,
+ .action = attr->action,
+ .flow_tag = attr->flow_tag,
.encap_id = 0,
};
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
+ int err;
- if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.vlan.ft.t;
- } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
@@ -97,6 +119,19 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
dest.counter = counter;
}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ flow_act.modify_id = attr->mod_hdr_id;
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_mod_hdr_id;
+ }
+ }
+
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
@@ -114,8 +149,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
table_created = true;
}
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+ &flow_act, &dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
@@ -128,6 +164,10 @@ err_add_rule:
priv->fs.tc.t = NULL;
}
err_create_ft:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
return rule;
@@ -138,47 +178,195 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_fc *counter = NULL;
- if (!IS_ERR(flow->rule)) {
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
- }
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+
+ if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ flow->nic_attr->mod_hdr_id);
}
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_handle *rule;
int err;
err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_vlan;
+ }
- return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_mod_hdr;
+ }
+ }
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
+
+err_add_rule:
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_mod_hdr:
+ mlx5_eswitch_del_vlan_action(esw, attr);
+err_add_vlan:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ mlx5e_detach_encap(priv, flow);
+ return rule;
+}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ }
- mlx5_eswitch_del_vlan_action(esw, flow->attr);
+ mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
- if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
+ kvfree(flow->esw_attr->parse_attr);
+ }
+
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+}
+
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ e->encap_size, e->encap_header,
+ &e->encap_id);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
+ err);
+ return;
+ }
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ flow->esw_attr->encap_id = e->encap_id;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv,
+ flow->esw_attr->parse_attr,
+ flow);
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ continue;
+ }
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ }
+}
+
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_fc *counter;
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
+ }
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ }
+}
+
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ u64 bytes, packets, lastuse = 0;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_encap_entry *e;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (m_neigh->family == AF_INET6)
+ tbl = ipv6_stub->nd_tbl;
+#endif
+ else
+ return;
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
+ continue;
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (neigh_used) {
+ nhe->reported_lastuse = jiffies;
+
+ /* find the relevant neigh according to the cached device and
+ * dst ip pair
+ */
+ n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
+ if (!n) {
+ WARN(1, "The neighbour already freed\n");
+ return;
+ }
+
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -188,22 +376,20 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
list_del(&flow->encap);
if (list_empty(next)) {
- struct mlx5_encap_entry *e;
+ struct mlx5e_encap_entry *e;
+
+ e = list_entry(next, struct mlx5e_encap_entry, flows);
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
- e = list_entry(next, struct mlx5_encap_entry, flows);
- if (e->n) {
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id);
- neigh_release(e->n);
- }
+
hlist_del_rcu(&e->encap_hlist);
+ kfree(e->encap_header);
kfree(e);
}
}
-/* we get here also when setting rule to the FW failed, etc. It means that the
- * flow rule itself might not exist, but some offloading related to the actions
- * should be cleaned.
- */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -631,16 +817,18 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep;
u8 min_inline;
int err;
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
- rep->vport != FDB_UPLINK_VPORT) {
- if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < min_inline) {
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ rep = rpriv->rep;
+ if (rep->vport != FDB_UPLINK_VPORT &&
+ (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
+ esw->offloads.inline_mode < min_inline)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode);
@@ -651,29 +839,313 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
+struct pedit_headers {
+ struct ethhdr eth;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+static int pedit_header_offsets[] = {
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers *masks,
+ struct pedit_headers *vals)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
+ goto out_err;
+
+ curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ goto out_err;
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+struct mlx5_fields {
+ u8 field;
+ u8 size;
+ u32 offset;
+};
+
+static struct mlx5_fields fields[] = {
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
+ {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
+
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
+
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+};
+
+/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, it says how many HW actions were
+ * actually parsed.
+ */
+static int offload_pedit_fields(struct pedit_headers *masks,
+ struct pedit_headers *vals,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ int i, action_size, nactions, max_actions, first, last;
+ void *s_masks_p, *a_masks_p, *vals_p;
+ u32 s_mask, a_mask, val;
+ struct mlx5_fields *f;
+ u8 cmd, field_bsize;
+ unsigned long mask;
+ void *action;
+
+ set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
+ set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action = parse_attr->mod_hdr_actions;
+ max_actions = parse_attr->num_mod_hdr_actions;
+ nactions = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ /* avoid seeing bits set from previous iterations */
+ s_mask = a_mask = mask = val = 0;
+
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+ memcpy(&s_mask, s_masks_p, f->size);
+ memcpy(&a_mask, a_masks_p, f->size);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+
+ if (s_mask && a_mask) {
+ printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
+ return -EOPNOTSUPP;
+ }
+
+ if (nactions == max_actions) {
+ printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
+ return -EOPNOTSUPP;
+ }
+
+ if (s_mask) {
+ cmd = MLX5_ACTION_TYPE_SET;
+ mask = s_mask;
+ vals_p = (void *)set_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(s_masks_p, 0, f->size);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(a_masks_p, 0, f->size);
+ }
+
+ memcpy(&val, vals_p, f->size);
+
+ field_bsize = f->size * BITS_PER_BYTE;
+ first = find_first_bit(&mask, field_bsize);
+ last = find_last_bit(&mask, field_bsize);
+ if (first > 0 || last != (field_bsize - 1)) {
+ printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, action, action_type, cmd);
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
+ MLX5_SET(set_action_in, action, offset, 0);
+ /* length is num of bits to be written, zero means length of 32 */
+ MLX5_SET(set_action_in, action, length, field_bsize);
+ }
+
+ if (field_bsize == 32)
+ MLX5_SET(set_action_in, action, data, ntohl(val));
+ else if (field_bsize == 16)
+ MLX5_SET(set_action_in, action, data, ntohs(val));
+ else if (field_bsize == 8)
+ MLX5_SET(set_action_in, action, data, val);
+
+ action += action_size;
+ nactions++;
+ }
+
+ parse_attr->num_mod_hdr_actions = nactions;
+ return 0;
+}
+
+static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int nkeys, action_size, max_actions;
+
+ nkeys = tcf_pedit_nkeys(a);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
+
+ /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
+ max_actions = min(max_actions, nkeys * 16);
+
+ parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
+ if (!parse_attr->mod_hdr_actions)
+ return -ENOMEM;
+
+ parse_attr->num_mod_hdr_actions = max_actions;
+ return 0;
+}
+
+static const struct pedit_headers zero_masks = {};
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
+ int nkeys, i, err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+ u8 cmd, htype;
+
+ nkeys = tcf_pedit_nkeys(a);
+
+ memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+ memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ cmd = tcf_pedit_cmd(a, i);
+ err = -EOPNOTSUPP; /* can't be all optimistic */
+
+ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
+ printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+ goto out_err;
+ }
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
+ printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+ goto out_err;
+ }
+
+ mask = tcf_pedit_mask(a, i);
+ val = tcf_pedit_val(a, i);
+ offset = tcf_pedit_offset(a, i);
+
+ err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
+ if (err)
+ goto out_err;
+ }
+
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+ if (err)
+ goto out_err;
+
+ err = offload_pedit_fields(masks, vals, parse_attr);
+ if (err < 0)
+ goto out_dealloc_parsed_actions;
+
+ for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
+ cmd_masks = &masks[cmd];
+ if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
+ cmd);
+ print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
+ 16, 1, cmd_masks, sizeof(zero_masks), true);
+ err = -EOPNOTSUPP;
+ goto out_dealloc_parsed_actions;
+ }
+ }
+
+ return 0;
+
+out_dealloc_parsed_actions:
+ kfree(parse_attr->mod_hdr_actions);
+out_err:
+ return err;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ int err;
if (tc_no_actions(exts))
return -EINVAL;
- *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- *action = 0;
+ attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ attr->action = 0;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
- if (*action)
+ if (attr->action)
return -EINVAL;
if (is_tcf_gact_shot(a)) {
- *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ continue;
+ }
+
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -686,8 +1158,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- *flow_tag = mark;
- *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flow_tag = mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -853,16 +1325,17 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
char *encap_header;
int ttl, err;
+ u8 nud_state;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -887,25 +1360,36 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
&fl4, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
- e->n = n;
- e->out_dev = *out_dev;
-
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- gen_vxlan_header_ipv4(*out_dev, encap_header,
+ gen_vxlan_header_ipv4(out_dev, encap_header,
ipv4_encap_size, e->h_dest, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
@@ -913,31 +1397,49 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
ipv4_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
-
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
char *encap_header;
int err, ttl = 0;
+ u8 nud_state;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -963,25 +1465,36 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
&fl6, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
-
- e->n = n;
- e->out_dev = *out_dev;
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- gen_vxlan_header_ipv6(*out_dev, encap_header,
+ gen_vxlan_header_ipv6(out_dev, encap_header,
ipv6_encap_size, e->h_dest, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
@@ -989,31 +1502,51 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
ipv6_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
- struct mlx5_esw_flow_attr *attr)
+ struct net_device **encap_dev,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info);
+ struct mlx5e_priv *up_priv = netdev_priv(up_dev);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
- struct mlx5_encap_entry *e;
- struct net_device *out_dev;
- int tunnel_type, err = -EOPNOTSUPP;
+ struct mlx5e_encap_entry *e;
+ int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
@@ -1048,10 +1581,8 @@ vxlan_encap_offload_err:
}
}
- if (found) {
- attr->encap = e;
- return 0;
- }
+ if (found)
+ goto attach_flow;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
@@ -1062,16 +1593,21 @@ vxlan_encap_offload_err:
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
- err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
- if (err)
+ if (err && err != -EAGAIN)
goto out_err;
- attr->encap = e;
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+attach_flow:
+ list_add(&flow->encap, &e->flows);
+ *encap_dev = e->out_dev;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
+ attr->encap_id = e->encap_id;
+
return err;
out_err:
@@ -1080,20 +1616,22 @@ out_err:
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err;
+ int err = 0;
if (tc_no_actions(exts))
return -EINVAL;
memset(attr, 0, sizeof(*attr));
- attr->in_rep = priv->ppriv;
+ attr->in_rep = rpriv->rep;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
@@ -1103,9 +1641,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ continue;
+ }
+
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out_dev;
+ struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1115,18 +1663,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
- attr->out_rep = out_priv->ppriv;
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
} else if (encap) {
err = mlx5e_attach_encap(priv, info,
- out_dev, attr);
- if (err)
+ out_dev, &encap_dev, flow);
+ if (err && err != -EAGAIN)
return err;
- list_add(&flow->encap, &attr->encap->flows);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- out_priv = netdev_priv(attr->encap->out_dev);
- attr->out_rep = out_priv->ppriv;
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
+ attr->parse_attr = parse_attr;
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@@ -1166,28 +1716,30 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- return 0;
+ return err;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err, attr_size = 0;
- u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
- struct mlx5_flow_spec *spec;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int attr_size, err = 0;
u8 flow_flags = 0;
if (esw && esw->mode == SRIOV_OFFLOADS) {
flow_flags = MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
+ } else {
+ flow_flags = MLX5E_TC_FLOW_NIC;
+ attr_size = sizeof(struct mlx5_nic_flow_attr);
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec || !flow) {
+ parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
}
@@ -1195,42 +1747,54 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
flow->cookie = f->cookie;
flow->flags = flow_flags;
- err = parse_cls_flower(priv, flow, spec, f);
+ err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
goto err_free;
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
- err = parse_tc_fdb_actions(priv, f->exts, flow);
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
- goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
+ goto err_handle_encap_flow;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
- err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+ flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
}
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_del_rule;
+ goto err_free;
}
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
goto err_del_rule;
- goto out;
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
+ !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ kvfree(parse_attr);
+ return err;
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
+err_handle_encap_flow:
+ if (err == -EAGAIN) {
+ err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ tc->ht_params);
+ if (err)
+ mlx5e_tc_del_flow(priv, flow);
+ else
+ return 0;
+ }
+
err_free:
+ kvfree(parse_attr);
kfree(flow);
-out:
- kvfree(spec);
return err;
}
@@ -1249,7 +1813,6 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
mlx5e_tc_del_flow(priv, flow);
-
kfree(flow);
return 0;
@@ -1272,6 +1835,9 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!flow)
return -EINVAL;
+ if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
+ return 0;
+
counter = mlx5_flow_rule_counter(flow->rule);
if (!counter)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 34bf903fc886..ecbe30d808ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -46,6 +46,15 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
+struct mlx5e_encap_entry;
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+struct mlx5e_neigh_hash_entry;
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
+
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{
return atomic_read(&priv->fs.tc.ht.nelems);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 57f5e2d7ebd1..ab3bb026ff9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -33,34 +33,12 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"
+#include "ipoib.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-
- memset(cseg, 0, sizeof(*cseg));
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
-
- sq->pc++;
- sq->stats.nop++;
-
- if (notify_hw) {
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
- }
-}
-
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma)
{
@@ -76,25 +54,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
- sq->db.txq.dma_fifo[i].addr = addr;
- sq->db.txq.dma_fifo[i].size = size;
- sq->db.txq.dma_fifo[i].type = map_type;
+ sq->db.dma_fifo[i].addr = addr;
+ sq->db.dma_fifo[i].size = size;
+ sq->db.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
- return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
int i;
@@ -111,6 +89,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
+ u16 num_channels;
int up = 0;
if (!netdev_get_num_tc(dev))
@@ -122,11 +101,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
*/
- if (channel_ix >= priv->params.num_channels)
- channel_ix = reciprocal_scale(channel_ix,
- priv->params.num_channels);
+ num_channels = priv->channels.params.num_channels;
+ if (channel_ix >= num_channels)
+ channel_ix = reciprocal_scale(channel_ix, num_channels);
- return priv->channeltc_to_txq_map[channel_ix][up];
+ return priv->channel_tc2txq[channel_ix][up];
}
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
@@ -175,25 +154,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
}
}
-static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb, bool bf)
-{
- /* Some NIC TX decisions, e.g loopback, are based on the packet
- * headers and occur before the data gather.
- * Therefore these headers must be copied into the WQE
- */
- if (bf) {
- u16 ihs = skb_headlen(skb);
-
- if (skb_vlan_tag_present(skb))
- ihs += VLAN_HLEN;
-
- if (ihs <= sq->max_inline)
- return skb_headlen(skb);
- }
- return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
-}
-
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len,
unsigned int len)
@@ -218,31 +178,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+static inline void
+mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- unsigned char *skb_data = skb->data;
- unsigned int skb_len = skb->len;
- u8 opcode = MLX5_OPCODE_SEND;
- dma_addr_t dma_addr = 0;
- unsigned int num_bytes;
- bool bf = false;
- u16 headlen;
- u16 ds_cnt;
- u16 ihs;
- int i;
-
- memset(wqe, 0, sizeof(*wqe));
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -254,74 +192,51 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
} else
sq->stats.csum_none++;
+}
- if (sq->cc != sq->prev_cc) {
- sq->prev_cc = sq->cc;
- sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
- }
-
- if (skb_is_gso(skb)) {
- eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- opcode = MLX5_OPCODE_LSO;
+static inline u16
+mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
+{
+ u16 ihs;
- if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
- sq->stats.tso_inner_packets++;
- sq->stats.tso_inner_bytes += skb->len - ihs;
- } else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
- sq->stats.tso_packets++;
- sq->stats.tso_bytes += skb->len - ihs;
- }
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- sq->stats.packets += skb_shinfo(skb)->gso_segs;
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ if (skb->encapsulation) {
+ ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ sq->stats.tso_inner_packets++;
+ sq->stats.tso_inner_bytes += skb->len - ihs;
} else {
- bf = sq->bf_budget &&
- !skb->xmit_more &&
- !skb_shinfo(skb)->nr_frags;
- ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
- sq->stats.packets++;
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- }
-
- sq->stats.bytes += num_bytes;
- wi->num_bytes = num_bytes;
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (ihs) {
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
- ihs += VLAN_HLEN;
- } else {
- memcpy(eseg->inline_hdr.start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
- }
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
- } else if (skb_vlan_tag_present(skb)) {
- eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
- eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += skb->len - ihs;
}
- dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+ *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ return ihs;
+}
- wi->num_dma = 0;
+static inline int
+mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ unsigned char *skb_data, u16 headlen,
+ struct mlx5_wqe_data_seg *dseg)
+{
+ dma_addr_t dma_addr = 0;
+ u8 num_dma = 0;
+ int i;
- headlen = skb_len - skb->data_len;
if (headlen) {
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- goto dma_unmap_wqe_err;
+ return -ENOMEM;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
- wi->num_dma++;
-
+ num_dma++;
dseg++;
}
@@ -330,59 +245,120 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- goto dma_unmap_wqe_err;
+ return -ENOMEM;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- wi->num_dma++;
-
+ num_dma++;
dseg++;
}
- ds_cnt += wi->num_dma;
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ return num_dma;
+}
- sq->db.txq.skb[pi] = skb;
+static inline void
+mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi;
+ wi->num_bytes = num_bytes;
+ wi->num_dma = num_dma;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- sq->pc += wi->num_wqebbs;
+ wi->skb = skb;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- netdev_tx_sent_queue(sq->txq, wi->num_bytes);
+ netdev_tx_sent_queue(sq->txq, num_bytes);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ sq->pc += wi->num_wqebbs;
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq);
sq->stats.stopped++;
}
- sq->stats.xmit_more += skb->xmit_more;
- if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
- int bf_sz = 0;
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
- if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
- bf_sz = wi->num_wqebbs << 3;
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.wqe_info[pi].skb = NULL;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ sq->stats.nop++;
+ }
+}
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+
+ unsigned char *skb_data = skb->data;
+ unsigned int skb_len = skb->len;
+ u8 opcode = MLX5_OPCODE_SEND;
+ unsigned int num_bytes;
+ int num_dma;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ if (skb_is_gso(skb)) {
+ opcode = MLX5_OPCODE_LSO;
+ ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
+ } else {
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ sq->stats.packets++;
}
+ sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
- /* fill sq edge with nops to avoid wqe wrap around */
- while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->db.txq.skb[pi] = NULL;
- mlx5e_send_nop(sq, false);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
+ ihs += VLAN_HLEN;
+ } else {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ }
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ } else if (skb_vlan_tag_present(skb)) {
+ eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
}
- if (bf)
- sq->bf_budget--;
+ headlen = skb_len - skb->data_len;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ if (unlikely(num_dma < 0))
+ goto dma_unmap_wqe_err;
+
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
@@ -398,21 +374,21 @@ dma_unmap_wqe_err:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+ struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
- struct mlx5e_sq *sq;
+ struct mlx5e_txqsq *sq;
u32 dma_fifo_cc;
u32 nbytes;
u16 npkts;
u16 sqcc;
int i;
- sq = container_of(cq, struct mlx5e_sq, cq);
+ sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
@@ -450,8 +426,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->db.txq.skb[ci];
- wi = &sq->db.txq.wqe_info[ci];
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +468,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
@@ -500,7 +476,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -509,8 +485,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->db.txq.skb[ci];
- wi = &sq->db.txq.wqe_info[ci];
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
if (!skb) { /* nop */
sq->cc++;
@@ -529,36 +505,89 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
}
}
-static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+#ifdef CONFIG_MLX5_CORE_IPOIB
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+struct mlx5i_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_datagram_seg datagram;
+ struct mlx5_wqe_eth_pad pad;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+static inline void
+mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
+ struct mlx5_wqe_datagram_seg *dseg)
{
- struct mlx5e_sq_wqe_info *wi;
- struct mlx5e_dma_info *di;
- u16 ci;
+ memcpy(&dseg->av, av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
+}
- while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
- di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
+netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- if (wi->opcode == MLX5_OPCODE_NOP) {
- sq->cc++;
- continue;
- }
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- sq->cc += wi->num_wqebbs;
+ unsigned char *skb_data = skb->data;
+ unsigned int skb_len = skb->len;
+ u8 opcode = MLX5_OPCODE_SEND;
+ unsigned int num_bytes;
+ int num_dma;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
- mlx5e_page_release(&sq->channel->rq, di, false);
+ memset(wqe, 0, sizeof(*wqe));
+
+ mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ if (skb_is_gso(skb)) {
+ opcode = MLX5_OPCODE_LSO;
+ ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ } else {
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
-}
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
-{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- mlx5e_free_txq_sq_descs(sq);
- break;
- case MLX5E_SQ_XDP:
- mlx5e_free_xdp_sq_descs(sq);
- break;
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
}
+
+ headlen = skb_len - skb->data_len;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ if (unlikely(num_dma < 0))
+ goto dma_unmap_wqe_err;
+
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ num_bytes, num_dma, wi, cseg);
+
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e5c12a732aa1..5ca6714e3e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -37,124 +37,69 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
struct mlx5_cqwq *wq = &cq->wq;
u32 ci = mlx5_cqwq_get_ci(wq);
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
- int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
- int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+ u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
if (cqe_ownership_bit != sw_ownership_val)
return NULL;
/* ensure cqe content is read after cqe ownership bit */
- rmb();
+ dma_rmb();
return cqe;
}
-static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
+ struct mlx5e_icosq *sq,
+ struct mlx5_cqe64 *cqe,
+ u16 *sqcc)
{
- struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
- struct mlx5_wq_cyc *wq;
- struct mlx5_cqe64 *cqe;
- u16 sqcc;
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
+ struct mlx5e_rq *rq = &sq->channel->rq;
+
+ prefetch(rq);
+ mlx5_cqwq_pop(&cq->wq);
+ *sqcc += icowi->num_wqebbs;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+ cqe->op_own);
return;
+ }
- cqe = mlx5e_get_cqe(cq);
- if (likely(!cqe))
+ if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
+ mlx5e_post_rx_mpwqe(rq);
return;
+ }
- wq = &sq->wq;
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- do {
- u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
-
- mlx5_cqwq_pop(&cq->wq);
- sqcc += icowi->num_wqebbs;
-
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
- break;
- }
-
- switch (icowi->opcode) {
- case MLX5_OPCODE_NOP:
- break;
- case MLX5_OPCODE_UMR:
- mlx5e_post_rx_mpwqe(&sq->channel->rq);
- break;
- default:
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
- }
-
- } while ((cqe = mlx5e_get_cqe(cq)));
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
+ if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
+ WARN_ONCE(true,
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ icowi->opcode);
}
-static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_sq *sq;
+ struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
+ struct mlx5_cqe64 *cqe;
u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
+ return;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (likely(!cqe))
+ return;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
- for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
- struct mlx5_cqe64 *cqe;
- u16 wqe_counter;
- bool last_wqe;
-
- cqe = mlx5e_get_cqe(cq);
- if (!cqe)
- break;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_sq_wqe_info *wi;
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = sqcc & sq->wq.sz_m1;
- di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
-
- if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
- sqcc++;
- continue;
- }
-
- sqcc += wi->num_wqebbs;
- /* Recycle RX page */
- mlx5e_page_release(&sq->channel->rq, di, true);
- } while (!last_wqe);
- }
+ /* by design, there's only a single cqe */
+ mlx5e_poll_ico_single_cqe(cq, sq, cqe, &sqcc);
mlx5_cqwq_update_db_record(&cq->wq);
@@ -162,7 +107,6 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
wmb();
sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
@@ -178,12 +122,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ if (c->xdp)
+ busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
- if (c->xdp)
- busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
-
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
@@ -224,8 +168,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct net_device *netdev = priv->netdev;
+ struct net_device *netdev = c->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index fcd5bc7e31db..2e34d95ea776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -53,13 +53,6 @@ struct esw_uc_addr {
u32 vport;
};
-/* E-Switch MC FDB table hash node */
-struct esw_mc_addr { /* SRIOV only */
- struct l2addr_node node;
- struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
- u32 refcnt;
-};
-
/* Vport UC/MC hash node */
struct vport_addr {
struct l2addr_node node;
@@ -337,6 +330,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -362,7 +356,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
+
+ ft_attr.max_fte = table_size;
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
@@ -814,7 +810,7 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
bool promisc, bool mc_promisc)
{
- struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
struct mlx5_vport *vport = &esw->vports[vport_num];
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
@@ -1685,7 +1681,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
esw->enabled_vports, esw->mode);
- mc_promisc = esw->mc_promisc;
+ mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports;
for (i = 0; i < esw->total_vports; i++)
@@ -1729,7 +1725,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
int total_vports = MLX5_TOTAL_VPORTS(dev);
- struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -1758,13 +1753,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
- mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
- if (!mc_promisc) {
- err = -ENOMEM;
- goto abort;
- }
- esw->mc_promisc = mc_promisc;
-
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -1803,6 +1791,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+ else
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
dev->priv.eswitch = esw;
return 0;
@@ -1827,7 +1820,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
- kfree(esw->mc_promisc);
kfree(esw->offloads.vport_reps);
kfree(esw->vports);
kfree(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ad329b1680b4..b746f62c8c79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,7 +36,6 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <net/devlink.h>
-#include <net/ip_tunnels.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -210,6 +209,14 @@ struct mlx5_esw_offload {
DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode;
u64 num_flows;
+ u8 encap;
+};
+
+/* E-Switch MC FDB table hash node */
+struct esw_mc_addr { /* SRIOV only */
+ struct l2addr_node node;
+ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
+ u32 refcnt;
};
struct mlx5_eswitch {
@@ -225,7 +232,7 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr *mc_promisc;
+ struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -285,20 +292,8 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
-
-struct mlx5_encap_entry {
- struct hlist_node encap_hlist;
- struct list_head flows;
- u32 encap_id;
- struct neighbour *n;
- struct ip_tunnel_info tun_info;
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
-
- struct net_device *out_dev;
- int tunnel_type;
-};
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
@@ -307,7 +302,9 @@ struct mlx5_esw_flow_attr {
int action;
u16 vlan;
bool vlan_handled;
- struct mlx5_encap_entry *encap;
+ u32 encap_id;
+ u32 mod_hdr_id;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
@@ -321,6 +318,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *rep);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d111cebca9f1..f991f669047e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_counter_alloc;
+ }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter = counter;
i++;
@@ -86,17 +88,25 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- if (attr->encap)
- flow_act.encap_id = attr->encap->encap_id;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ flow_act.modify_id = attr->mod_hdr_id;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ flow_act.encap_id = attr->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, &flow_act, dest, i);
if (IS_ERR(rule))
- mlx5_fc_destroy(esw->dev, counter);
+ goto err_add_rule;
else
esw->offloads.num_flows++;
return rule;
+
+err_add_rule:
+ mlx5_fc_destroy(esw->dev, counter);
+err_counter_alloc:
+ return rule;
}
void
@@ -106,12 +116,10 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
{
struct mlx5_fc *counter = NULL;
- if (!IS_ERR(rule)) {
- counter = mlx5_flow_rule_counter(rule);
- mlx5_del_flow_rules(rule);
- mlx5_fc_destroy(esw->dev, counter);
- esw->offloads.num_flows--;
- }
+ counter = mlx5_flow_rule_counter(rule);
+ mlx5_del_flow_rules(rule);
+ mlx5_fc_destroy(esw->dev, counter);
+ esw->offloads.num_flows--;
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
@@ -418,30 +426,21 @@ out:
return err;
}
-#define MAX_PF_SQ 256
#define ESW_OFFLOADS_NUM_GROUPS 4
-static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int table_size, ix, esw_size, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- void *match_criteria;
+ int esw_size, err = 0;
u32 flags = 0;
- flow_group_in = mlx5_vzalloc(inlen);
- if (!flow_group_in)
- return -ENOMEM;
-
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
- goto ns_err;
+ goto out;
}
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
@@ -451,8 +450,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
@@ -462,12 +460,55 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
- goto fast_fdb_err;
+ goto out;
}
esw->fdb_table.fdb = fdb;
+out:
+ return err;
+}
+
+static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+{
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+#define MAX_PF_SQ 256
+
+static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb = NULL;
+ int table_size, ix, err = 0;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+
+ esw_debug(esw->dev, "Create offloads FDB Tables\n");
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
+ goto ns_err;
+ }
+
+ err = esw_create_offloads_fast_fdb_table(esw);
+ if (err)
+ goto fast_fdb_err;
+
table_size = nvports + MAX_PF_SQ + 1;
- fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
+
+ ft_attr.max_fte = table_size;
+ ft_attr.prio = FDB_SLOW_PATH;
+
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
@@ -532,25 +573,26 @@ ns_err:
return err;
}
-static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.fdb)
return;
- esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+ esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw_destroy_offloads_fast_fdb_table(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_namespace *ns;
int err = 0;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
@@ -559,7 +601,9 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
+ ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
+
+ ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
err = PTR_ERR(ft_offloads);
esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
@@ -700,7 +744,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
- err = esw_create_offloads_fdb_table(esw, nvports);
+ err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
goto create_fdb_err;
@@ -737,7 +781,7 @@ create_fg_err:
esw_destroy_offloads_table(esw);
create_ft_err:
- esw_destroy_offloads_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
/* enable back PF RoCE */
@@ -783,7 +827,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
- esw_destroy_offloads_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
@@ -1012,6 +1056,66 @@ out:
return 0;
}
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
+ (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
+ return -EOPNOTSUPP;
+
+ if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_LEGACY) {
+ esw->offloads.encap = encap;
+ return 0;
+ }
+
+ if (esw->offloads.encap == encap)
+ return 0;
+
+ if (esw->offloads.num_flows > 0) {
+ esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ esw_destroy_offloads_fast_fdb_table(esw);
+
+ esw->offloads.encap = encap;
+ err = esw_create_offloads_fast_fdb_table(esw);
+ if (err) {
+ esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+ esw->offloads.encap = !encap;
+ (void) esw_create_offloads_fast_fdb_table(esw);
+ }
+ return err;
+}
+
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ *encap = esw->offloads.encap;
+ return 0;
+}
+
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *__rep)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index b64a781c7e85..19e3d2fc2099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -45,6 +45,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ ft->underlay_qpn == 0)
+ return 0;
+
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
@@ -54,6 +58,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
+ if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ ft->underlay_qpn != 0)
+ MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -249,6 +257,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
+ MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
@@ -515,3 +524,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+ int max_actions, actions_size, inlen, err;
+ void *actions_in;
+ u8 table_type;
+ u32 *in;
+
+ switch (namespace) {
+ case MLX5_FLOW_NAMESPACE_FDB:
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
+ table_type = FS_FT_FDB;
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_RX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (num_actions > max_actions) {
+ mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
+ num_actions, max_actions);
+ return -EOPNOTSUPP;
+ }
+
+ actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+ inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+ MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
+
+ actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+ memcpy(actions_in, modify_actions, actions_size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ kfree(in);
+ return err;
+}
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+ modify_header_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index ded27bb9a3b6..b8a176503d38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
+ fte->modify_id = flow_act->modify_id;
return fte;
}
@@ -777,18 +778,16 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
}
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr,
enum fs_flow_table_op_mod op_mod,
- u16 vport, int prio,
- int max_fte, u32 level,
- u32 flags)
+ u16 vport)
{
+ struct mlx5_flow_root_namespace *root = find_root(&ns->node);
struct mlx5_flow_table *next_ft = NULL;
+ struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
- int err;
int log_table_sz;
- struct mlx5_flow_root_namespace *root =
- find_root(&ns->node);
- struct fs_prio *fs_prio = NULL;
+ int err;
if (!root) {
pr_err("mlx5: flow steering failed to find root of namespace\n");
@@ -796,29 +795,31 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
}
mutex_lock(&root->chain_lock);
- fs_prio = find_prio(ns, prio);
+ fs_prio = find_prio(ns, ft_attr->prio);
if (!fs_prio) {
err = -EINVAL;
goto unlock_root;
}
- if (level >= fs_prio->num_levels) {
+ if (ft_attr->level >= fs_prio->num_levels) {
err = -ENOSPC;
goto unlock_root;
}
/* The level is related to the
* priority level range.
*/
- level += fs_prio->start_level;
- ft = alloc_flow_table(level,
+ ft_attr->level += fs_prio->start_level;
+ ft = alloc_flow_table(ft_attr->level,
vport,
- max_fte ? roundup_pow_of_two(max_fte) : 0,
+ ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
root->table_type,
- op_mod, flags);
+ op_mod, ft_attr->flags);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
}
+ ft->underlay_qpn = ft_attr->underlay_qpn;
+
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
@@ -848,44 +849,56 @@ unlock_root:
}
struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio, int max_fte,
- u32 level,
- u32 flags)
+ struct mlx5_flow_table_attr *ft_attr)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
- max_fte, level, flags);
+ return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level, u16 vport)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
- max_fte, level, 0);
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.max_fte = max_fte;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
}
-struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
- struct mlx5_flow_namespace *ns,
- int prio, u32 level)
+struct mlx5_flow_table*
+mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, u32 level)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
- level, 0);
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
-struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags)
+struct mlx5_flow_table*
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ int max_num_groups,
+ u32 level,
+ u32 flags)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
+ ft_attr.max_fte = num_flow_table_entries;
+ ft_attr.prio = prio;
+ ft_attr.level = level;
+ ft_attr.flags = flags;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ft;
@@ -1827,12 +1840,18 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (WARN_ON(!ns))
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
+
+ ft_attr.max_fte = ANCHOR_SIZE;
+ ft_attr.level = ANCHOR_LEVEL;
+ ft_attr.prio = ANCHOR_PRIO;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
@@ -1886,9 +1905,6 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return;
-
cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->esw_egress_root_ns);
cleanup_root_ns(steering->esw_ingress_root_ns);
@@ -1991,9 +2007,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
struct mlx5_flow_steering *steering;
int err = 0;
- if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
err = mlx5_init_fc_stats(dev);
if (err)
return err;
@@ -2004,7 +2017,10 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
- if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev, nic_flow_table))) ||
+ ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 8e668c63f69e..81eafc7b9dd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -118,6 +118,7 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
+ u32 underlay_qpn;
};
struct mlx5_fc_cache {
@@ -152,6 +153,7 @@ struct fs_fte {
u32 index;
u32 action;
u32 encap_id;
+ u32 modify_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
};
@@ -197,6 +199,11 @@ struct mlx5_flow_root_namespace {
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
+ struct delayed_work *dwork,
+ unsigned long delay);
+void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
+ unsigned long interval);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7431f633de31..6507d8acc54d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -165,7 +165,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
list_splice_tail_init(&fc_stats->addlist, &tmplist);
if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
- queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work,
+ fc_stats->sampling_interval);
spin_unlock(&fc_stats->addlist_lock);
@@ -200,7 +201,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
node = mlx5_fc_stats_query(dev, counter, last->id);
}
- fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+ fc_stats->next_query = now + fc_stats->sampling_interval;
}
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
@@ -265,6 +266,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
if (!fc_stats->wq)
return -ENOMEM;
+ fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
return 0;
@@ -317,3 +319,21 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter,
counter->lastbytes = c.bytes;
counter->lastpackets = c.packets;
}
+
+void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ queue_delayed_work(fc_stats->wq, dwork, delay);
+}
+
+void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
+ unsigned long interval)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ fc_stats->sampling_interval = min_t(unsigned long, interval,
+ fc_stats->sampling_interval);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index d0bbefa08af7..1bc14d0fded8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -137,7 +137,8 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, nic_flow_table) ||
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
new file mode 100644
index 000000000000..3c84e36af018
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/fs.h>
+#include "en.h"
+#include "ipoib.h"
+
+#define IB_DEFAULT_Q_KEY 0xb1b
+
+static int mlx5i_open(struct net_device *netdev);
+static int mlx5i_close(struct net_device *netdev);
+static int mlx5i_dev_init(struct net_device *dev);
+static void mlx5i_dev_cleanup(struct net_device *dev);
+
+static const struct net_device_ops mlx5i_netdev_ops = {
+ .ndo_open = mlx5i_open,
+ .ndo_stop = mlx5i_close,
+ .ndo_init = mlx5i_dev_init,
+ .ndo_uninit = mlx5i_dev_cleanup,
+};
+
+/* IPoIB mlx5 netdev profile */
+
+/* Called directly after IPoIB netdevice was created to initialize SW structs */
+static void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+
+ mutex_init(&priv->state_lock);
+
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ netdev->hw_features |= NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->netdev_ops = &mlx5i_netdev_ops;
+}
+
+/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
+static void mlx5i_cleanup(struct mlx5e_priv *priv)
+{
+ /* Do nothing .. */
+}
+
+#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
+
+static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+{
+ struct mlx5_qp_context *context = NULL;
+ u32 *in = NULL;
+ void *addr_path;
+ int ret = 0;
+ int inlen;
+ void *qpc;
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
+ MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
+
+ addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, grh, 1);
+
+ ret = mlx5_core_create_qp(mdev, qp, in, inlen);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
+ goto out;
+ }
+
+ /* QP states */
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ context->pri_path.port = 1;
+ context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
+ goto out;
+ }
+ memset(context, 0, sizeof(*context));
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
+ goto out;
+ }
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
+ goto out;
+ }
+
+out:
+ kfree(context);
+ kvfree(in);
+ return ret;
+}
+
+static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+{
+ mlx5_core_destroy_qp(mdev, qp);
+}
+
+static int mlx5i_init_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+}
+
+static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+
+ if (!priv->fs.ns)
+ return -EINVAL;
+
+ err = mlx5e_arfs_create_tables(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+ err);
+ priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ }
+
+ err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
+
+ return 0;
+
+err_destroy_arfs_tables:
+ mlx5e_arfs_destroy_tables(priv);
+
+ return err;
+}
+
+static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_arfs_destroy_tables(priv);
+}
+
+static int mlx5i_init_rx(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_indirect_rqt(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_direct_rqts(priv);
+ if (err)
+ goto err_destroy_indirect_rqts;
+
+ err = mlx5e_create_indirect_tirs(priv);
+ if (err)
+ goto err_destroy_direct_rqts;
+
+ err = mlx5e_create_direct_tirs(priv);
+ if (err)
+ goto err_destroy_indirect_tirs;
+
+ err = mlx5i_create_flow_steering(priv);
+ if (err)
+ goto err_destroy_direct_tirs;
+
+ return 0;
+
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+ mlx5e_destroy_direct_rqts(priv);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ return err;
+}
+
+static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
+{
+ mlx5i_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static const struct mlx5e_profile mlx5i_nic_profile = {
+ .init = mlx5i_init,
+ .cleanup = mlx5i_cleanup,
+ .init_tx = mlx5i_init_tx,
+ .cleanup_tx = mlx5i_cleanup_tx,
+ .init_rx = mlx5i_init_rx,
+ .cleanup_rx = mlx5i_cleanup_rx,
+ .enable = NULL, /* mlx5i_enable */
+ .disable = NULL, /* mlx5i_disable */
+ .update_stats = NULL, /* mlx5i_update_stats */
+ .max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .max_tc = MLX5I_MAX_NUM_TC,
+};
+
+/* mlx5i netdev NDos */
+
+static int mlx5i_dev_init(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ /* Set dev address using underlay QP */
+ dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
+ dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
+ dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+
+ return 0;
+}
+
+static void mlx5i_dev_cleanup(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_qp_context context;
+
+ /* detach qp from flow-steering by reset it */
+ mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp);
+}
+
+static int mlx5i_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ err = mlx5e_open_channels(priv, &priv->channels);
+ if (err)
+ goto err_clear_state_opened_flag;
+
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+
+err_clear_state_opened_flag:
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5i_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+ */
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ netif_carrier_off(priv->netdev);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+#ifdef notusedyet
+/* IPoIB RDMA netdev callbacks */
+static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
+ union ib_gid *gid, u16 lid, int set_qkey)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ int err;
+
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
+ if (err)
+ mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
+ ipriv->qp.qpn, gid->raw);
+
+ return err;
+}
+
+static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
+ union ib_gid *gid, u16 lid)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ int err;
+
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
+ if (err)
+ mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n",
+ ipriv->qp.qpn, gid->raw);
+
+ return err;
+}
+
+static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn, u32 dqkey)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+ struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
+ struct mlx5_ib_ah *mah = to_mah(address);
+
+ return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey);
+}
+#endif
+
+static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+ mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
+{
+ const struct mlx5e_profile *profile = &mlx5i_nic_profile;
+ int nch = profile->max_nch(mdev);
+ struct net_device *netdev;
+ struct mlx5i_priv *ipriv;
+ struct mlx5e_priv *epriv;
+ int err;
+
+ if (mlx5i_check_required_hca_cap(mdev)) {
+ mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* This function should only be called once per mdev */
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ return NULL;
+
+ netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
+ name, NET_NAME_UNKNOWN,
+ setup,
+ nch * MLX5E_MAX_NUM_TC,
+ nch);
+ if (!netdev) {
+ mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
+ goto free_mdev_resources;
+ }
+
+ ipriv = netdev_priv(netdev);
+ epriv = mlx5i_epriv(netdev);
+
+ epriv->wq = create_singlethread_workqueue("mlx5i");
+ if (!epriv->wq)
+ goto err_free_netdev;
+
+ profile->init(mdev, netdev, profile, ipriv);
+
+ mlx5e_attach_netdev(epriv);
+ netif_carrier_off(netdev);
+
+ /* TODO: set rdma_netdev func pointers
+ * rn = &ipriv->rn;
+ * rn->hca = ibdev;
+ * rn->send = mlx5i_xmit;
+ * rn->attach_mcast = mlx5i_attach_mcast;
+ * rn->detach_mcast = mlx5i_detach_mcast;
+ */
+ return netdev;
+
+err_free_netdev:
+ free_netdev(netdev);
+free_mdev_resources:
+ mlx5e_destroy_mdev_resources(mdev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
+
+static void mlx5_rdma_netdev_free(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ const struct mlx5e_profile *profile = priv->profile;
+
+ mlx5e_detach_netdev(priv);
+ profile->cleanup(priv);
+ destroy_workqueue(priv->wq);
+ free_netdev(netdev);
+
+ mlx5e_destroy_mdev_resources(priv->mdev);
+}
+EXPORT_SYMBOL(mlx5_rdma_netdev_free);
+
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
index 63c666d0b739..bae0a5cbc8ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
@@ -1,5 +1,5 @@
-/* QLogic qed NIC Driver
- * Copyright (c) 2015-2017 QLogic Corporation
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -17,7 +17,7 @@
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
+ * disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
@@ -29,19 +29,26 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef _QED_PTP_H
-#define _QED_PTP_H
-#include <linux/types.h>
-int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_ptp_filter_type type);
-int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u64 *cycles);
-int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
-int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#ifndef __MLX5E_IPOB_H__
+#define __MLX5E_IPOB_H__
-#endif
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+#define MLX5I_MAX_NUM_TC 1
+
+/* ipoib rdma netdev's private data structure */
+struct mlx5i_priv {
+ struct mlx5_core_qp qp;
+ char *mlx5e_priv[0];
+};
+
+/* Extract mlx5e_priv from IPoIB netdev */
+#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
+
+netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey);
+void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+#endif /* __MLX5E_IPOB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0ad66324247f..0c123d571b4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1280,6 +1280,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
+ .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
+ .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
#endif
};
@@ -1514,8 +1516,10 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
- { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
- { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
+ { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
+ { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
+ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b3dabe6e8836..fbc6e9e9e305 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 6b6c30deee83..2fb8c6585ac7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o
+ spectrum_acl.o spectrum_flower.o \
+ spectrum_cnt.o spectrum_dpipe.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index a1b48421648a..479511cf79bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_cq_oi
- * When set, overrun ignore is enabled. When set, updates of
- * CQ consumer counter (poll for completion) or Request completion
- * notifications (Arm CQ) DoorBells should not be rung on that CQ.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_cq_st
* Event delivery state machine
* 0x0 - FIRED
@@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_eq_oi
- * When set, overrun ignore is enabled.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_eq_st
* Event delivery state machine
* 0x0 - FIRED
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index a4c07841aaf6..affe84eb4bff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -40,9 +40,6 @@
#include <linux/export.h>
#include <linux/err.h>
#include <linux/if_link.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/u64_stats_sync.h>
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <linux/skbuff.h>
@@ -74,23 +71,9 @@ static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
static const char mlxsw_core_driver_name[] = "mlxsw_core";
-static struct dentry *mlxsw_core_dbg_root;
-
static struct workqueue_struct *mlxsw_wq;
static struct workqueue_struct *mlxsw_owq;
-struct mlxsw_core_pcpu_stats {
- u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
- u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
- u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
- u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
- struct u64_stats_sync syncp;
- u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
- u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
- u32 trap_rx_invalid;
- u32 port_rx_invalid;
-};
-
struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
@@ -121,23 +104,48 @@ struct mlxsw_core {
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
} emad;
- struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
- struct dentry *dbg_dir;
- struct {
- struct debugfs_blob_wrapper vsd_blob;
- struct debugfs_blob_wrapper psid_blob;
- } dbg;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
- struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_core_port *ports;
+ unsigned int max_ports;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
+#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+{
+ /* Switch ports are numbered from 1 to queried value */
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
+ mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_SYSTEM_PORT) + 1;
+ else
+ mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
+
+ mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
+ sizeof(struct mlxsw_core_port), GFP_KERNEL);
+ if (!mlxsw_core->ports)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+{
+ kfree(mlxsw_core->ports);
+}
+
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->max_ports;
+}
+EXPORT_SYMBOL(mlxsw_core_max_ports);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -703,91 +711,6 @@ err_out:
* Core functions
*****************/
-static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_core *mlxsw_core = file->private;
- struct mlxsw_core_pcpu_stats *p;
- u64 rx_packets, rx_bytes;
- u64 tmp_rx_packets, tmp_rx_bytes;
- u32 rx_dropped, rx_invalid;
- unsigned int start;
- int i;
- int j;
- static const char hdr[] =
- " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
- rx_packets = 0;
- rx_bytes = 0;
- rx_dropped = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- tmp_rx_packets = p->trap_rx_packets[i];
- tmp_rx_bytes = p->trap_rx_bytes[i];
- } while (u64_stats_fetch_retry(&p->syncp, start));
-
- rx_packets += tmp_rx_packets;
- rx_bytes += tmp_rx_bytes;
- rx_dropped += p->trap_rx_dropped[i];
- }
- seq_printf(file, "trap %3d %12llu %12llu %10u\n",
- i, rx_packets, rx_bytes, rx_dropped);
- }
- rx_invalid = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- rx_invalid += p->trap_rx_invalid;
- }
- seq_printf(file, "trap INV %10u\n",
- rx_invalid);
-
- for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
- rx_packets = 0;
- rx_bytes = 0;
- rx_dropped = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- tmp_rx_packets = p->port_rx_packets[i];
- tmp_rx_bytes = p->port_rx_bytes[i];
- } while (u64_stats_fetch_retry(&p->syncp, start));
-
- rx_packets += tmp_rx_packets;
- rx_bytes += tmp_rx_bytes;
- rx_dropped += p->port_rx_dropped[i];
- }
- seq_printf(file, "port %3d %12llu %12llu %10u\n",
- i, rx_packets, rx_bytes, rx_dropped);
- }
- rx_invalid = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- rx_invalid += p->port_rx_invalid;
- }
- seq_printf(file, "port INV %10u\n",
- rx_invalid);
- return 0;
-}
-
-static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
-{
- struct mlxsw_core *mlxsw_core = inode->i_private;
-
- return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
-}
-
-static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
- .owner = THIS_MODULE,
- .open = mlxsw_core_rx_stats_dbg_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{
spin_lock(&mlxsw_core_driver_list_lock);
@@ -835,39 +758,13 @@ static void mlxsw_core_driver_put(const char *kind)
spin_unlock(&mlxsw_core_driver_list_lock);
}
-static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
-{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
-
- mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
- mlxsw_core_dbg_root);
- if (!mlxsw_core->dbg_dir)
- return -ENOMEM;
- debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
- mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
- mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
- mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
- debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
- &mlxsw_core->dbg.vsd_blob);
- mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
- mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
- debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
- &mlxsw_core->dbg.psid_blob);
- return 0;
-}
-
-static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
-{
- debugfs_remove_recursive(mlxsw_core->dbg_dir);
-}
-
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= MLXSW_PORT_MAX_PORTS)
+ if (port_index >= mlxsw_core->max_ports)
return -EINVAL;
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
@@ -879,7 +776,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= MLXSW_PORT_MAX_PORTS)
+ if (port_index >= mlxsw_core->max_ports)
return -EINVAL;
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
@@ -1101,18 +998,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
- mlxsw_core->pcpu_stats =
- netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
- if (!mlxsw_core->pcpu_stats) {
- err = -ENOMEM;
- goto err_alloc_stats;
- }
-
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->res);
if (err)
goto err_bus_init;
+ err = mlxsw_ports_init(mlxsw_core);
+ if (err)
+ goto err_ports_init;
+
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
alloc_size = sizeof(u8) *
@@ -1148,15 +1042,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- err = mlxsw_core_debugfs_init(mlxsw_core);
- if (err)
- goto err_debugfs_init;
-
return 0;
-err_debugfs_init:
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
@@ -1167,10 +1054,10 @@ err_devlink_register:
err_emad_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_ports_fini(mlxsw_core);
+err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- free_percpu(mlxsw_core->pcpu_stats);
-err_alloc_stats:
devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
@@ -1183,15 +1070,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- mlxsw_core_debugfs_fini(mlxsw_core);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
+ mlxsw_ports_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- free_percpu(mlxsw_core->pcpu_stats);
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1639,7 +1525,6 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
{
struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl;
- struct mlxsw_core_pcpu_stats *pcpu_stats;
u8 local_port;
bool found = false;
@@ -1661,7 +1546,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
__func__, local_port, rx_info->trap_id);
if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
- (local_port >= MLXSW_PORT_MAX_PORTS))
+ (local_port >= mlxsw_core->max_ports))
goto drop;
rcu_read_lock();
@@ -1678,26 +1563,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
if (!found)
goto drop;
- pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->port_rx_packets[local_port]++;
- pcpu_stats->port_rx_bytes[local_port] += skb->len;
- pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
- pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
- u64_stats_update_end(&pcpu_stats->syncp);
-
rxl->func(skb, local_port, rxl_item->priv);
return;
drop:
- if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
- this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
- else
- this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
- if (local_port >= MLXSW_PORT_MAX_PORTS)
- this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
- else
- this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(mlxsw_core_skb_receive);
@@ -1926,15 +1795,8 @@ static int __init mlxsw_core_module_init(void)
err = -ENOMEM;
goto err_alloc_ordered_workqueue;
}
- mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
- if (!mlxsw_core_dbg_root) {
- err = -ENOMEM;
- goto err_debugfs_create_dir;
- }
return 0;
-err_debugfs_create_dir:
- destroy_workqueue(mlxsw_owq);
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
return err;
@@ -1942,7 +1804,6 @@ err_alloc_ordered_workqueue:
static void __exit mlxsw_core_module_exit(void)
{
- debugfs_remove_recursive(mlxsw_core_dbg_root);
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index cf38cf9027f8..7fb35395adf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -57,6 +57,8 @@ struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5f337715a4da..46304ffb9449 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
}
+/* VLAN Action
+ * -----------
+ * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
+ * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
+ * and more.
+ */
+
+#define MLXSW_AFA_VLAN_CODE 0x02
+#define MLXSW_AFA_VLAN_SIZE 1
+
+enum mlxsw_afa_vlan_vlan_tag_cmd {
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
+};
+
+enum mlxsw_afa_vlan_cmd {
+ MLXSW_AFA_VLAN_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER,
+ MLXSW_AFA_VLAN_CMD_SET_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
+ MLXSW_AFA_VLAN_CMD_SWAP,
+};
+
+/* afa_vlan_vlan_tag_cmd
+ * Tag command: push, pop, nop VLAN header.
+ */
+MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
+
+/* afa_vlan_vid_cmd */
+MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
+
+/* afa_vlan_vid */
+MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
+
+/* afa_vlan_ethertype_cmd */
+MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
+
+/* afa_vlan_ethertype
+ * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
+ */
+MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
+
+/* afa_vlan_pcp_cmd */
+MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
+
+/* afa_vlan_pcp */
+MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
+
+static inline void
+mlxsw_afa_vlan_pack(char *payload,
+ enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
+ enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
+ enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
+ enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
+{
+ mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
+ mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
+ mlxsw_afa_vlan_vid_set(payload, vid);
+ mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
+ mlxsw_afa_vlan_pcp_set(payload, pcp);
+ mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
+ mlxsw_afa_vlan_ethertype_set(payload, ethertype);
+}
+
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VLAN_CODE,
+ MLXSW_AFA_VLAN_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
+
/* Trap / Discard Action
* ---------------------
* The Trap / Discard action enables trapping / mirroring packets to the CPU
@@ -677,3 +760,98 @@ err_append_action:
return err;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+
+/* Policing and Counting Action
+ * ----------------------------
+ * Policing and Counting action is used for binding policer and counter
+ * to ACL rules.
+ */
+
+#define MLXSW_AFA_POLCNT_CODE 0x08
+#define MLXSW_AFA_POLCNT_SIZE 1
+
+enum mlxsw_afa_polcnt_counter_set_type {
+ /* No count */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* afa_polcnt_counter_set_type
+ * Counter set type for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
+
+/* afa_polcnt_counter_index
+ * Counter index for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
+
+static inline void
+mlxsw_afa_polcnt_pack(char *payload,
+ enum mlxsw_afa_polcnt_counter_set_type set_type,
+ u32 counter_index)
+{
+ mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
+ mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
+}
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_POLCNT_CODE,
+ MLXSW_AFA_POLCNT_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
+ counter_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
+
+/* Virtual Router and Forwarding Domain Action
+ * -------------------------------------------
+ * Virtual Switch action is used for manipulate the Virtual Router (VR),
+ * MPLS label space and the Forwarding Identifier (FID).
+ */
+
+#define MLXSW_AFA_VIRFWD_CODE 0x0E
+#define MLXSW_AFA_VIRFWD_SIZE 1
+
+enum mlxsw_afa_virfwd_fid_cmd {
+ /* Do nothing */
+ MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
+ /* Set the Forwarding Identifier (FID) to fid */
+ MLXSW_AFA_VIRFWD_FID_CMD_SET,
+};
+
+/* afa_virfwd_fid_cmd */
+MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
+
+/* afa_virfwd_fid
+ * The FID value.
+ */
+MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
+
+static inline void mlxsw_afa_virfwd_pack(char *payload,
+ enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
+ u16 fid)
+{
+ mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
+ mlxsw_afa_virfwd_fid_set(payload, fid);
+}
+
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VIRFWD_CODE,
+ MLXSW_AFA_VIRFWD_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43f78dcfe394..bd8b91d02880 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -62,5 +62,10 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index e4fcba7c2af2..c75e9141e3ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -54,6 +54,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_DST_IP6_LO,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -88,7 +90,7 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-/* For the purpose of the driver, define a internal storage scratchpad
+/* For the purpose of the driver, define an internal storage scratchpad
* that will be used to store key/mask values. For each defined element type
* define an internal storage geometry.
*/
@@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a223c85dfde0..23f7d828cf67 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -44,8 +44,6 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/log2.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
#include <linux/string.h>
#include "pci_hw.h"
@@ -57,8 +55,6 @@
static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-static struct dentry *mlxsw_pci_dbg_root;
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -71,21 +67,6 @@ enum mlxsw_pci_queue_type {
MLXSW_PCI_QUEUE_TYPE_EQ,
};
-static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
-{
- switch (q_type) {
- case MLXSW_PCI_QUEUE_TYPE_SDQ:
- return "sdq";
- case MLXSW_PCI_QUEUE_TYPE_RDQ:
- return "rdq";
- case MLXSW_PCI_QUEUE_TYPE_CQ:
- return "cq";
- case MLXSW_PCI_QUEUE_TYPE_EQ:
- return "eq";
- }
- BUG();
-}
-
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
static const u16 mlxsw_pci_doorbell_type_offset[] = {
@@ -155,7 +136,6 @@ struct mlxsw_pci {
u8 __iomem *hw_addr;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
- struct msix_entry msix_entry;
struct mlxsw_core *core;
struct {
struct mlxsw_pci_mem_item *items;
@@ -174,7 +154,6 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
- struct dentry *dbg_dir;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -261,21 +240,11 @@ static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
}
-static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
-}
-
static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
{
return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
}
-static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
-}
-
static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -390,26 +359,6 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_sdq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %5d\n",
- i, q->producer_counter, q->consumer_counter,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -544,26 +493,6 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
}
}
-static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_rdq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %5d\n",
- i, q->producer_counter, q->consumer_counter,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -580,7 +509,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -602,27 +530,6 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_cq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %10d %5d\n",
- i, q->consumer_counter, q->u.cq.comp_sdq_count,
- q->u.cq.comp_rdq_count, q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
@@ -755,7 +662,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -777,27 +683,6 @@ static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_eq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
- i, q->consumer_counter, q->u.eq.ev_cmd_count,
- q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
{
mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
@@ -868,7 +753,6 @@ struct mlxsw_pci_queue_ops {
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
void (*tasklet)(unsigned long data);
- int (*dbg_read)(struct seq_file *s, void *data);
u16 elem_count;
u8 elem_size;
};
@@ -877,7 +761,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_SDQ,
.init = mlxsw_pci_sdq_init,
.fini = mlxsw_pci_sdq_fini,
- .dbg_read = mlxsw_pci_sdq_dbg_read,
.elem_count = MLXSW_PCI_WQE_COUNT,
.elem_size = MLXSW_PCI_WQE_SIZE,
};
@@ -886,7 +769,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_RDQ,
.init = mlxsw_pci_rdq_init,
.fini = mlxsw_pci_rdq_fini,
- .dbg_read = mlxsw_pci_rdq_dbg_read,
.elem_count = MLXSW_PCI_WQE_COUNT,
.elem_size = MLXSW_PCI_WQE_SIZE
};
@@ -896,7 +778,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
.tasklet = mlxsw_pci_cq_tasklet,
- .dbg_read = mlxsw_pci_cq_dbg_read,
.elem_count = MLXSW_PCI_CQE_COUNT,
.elem_size = MLXSW_PCI_CQE_SIZE
};
@@ -906,7 +787,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
.init = mlxsw_pci_eq_init,
.fini = mlxsw_pci_eq_fini,
.tasklet = mlxsw_pci_eq_tasklet,
- .dbg_read = mlxsw_pci_eq_dbg_read,
.elem_count = MLXSW_PCI_EQE_COUNT,
.elem_size = MLXSW_PCI_EQE_SIZE
};
@@ -984,9 +864,7 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_pci_queue_ops *q_ops,
u8 num_qs)
{
- struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_type_group *queue_group;
- char tmp[16];
int i;
int err;
@@ -1003,10 +881,6 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
queue_group->count = num_qs;
- sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
- debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
- q_ops->dbg_read);
-
return 0;
err_queue_init:
@@ -1534,7 +1408,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_aqs_init;
- err = request_irq(mlxsw_pci->msix_entry.vector,
+ err = request_irq(pci_irq_vector(pdev, 0),
mlxsw_pci_eq_irq_handler, 0,
mlxsw_pci->bus_info.device_kind, mlxsw_pci);
if (err) {
@@ -1567,7 +1441,7 @@ static void mlxsw_pci_fini(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
@@ -1842,8 +1716,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
- if (err) {
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
}
@@ -1852,14 +1726,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
- mlxsw_pci_dbg_root);
- if (!mlxsw_pci->dbg_dir) {
- dev_err(&pdev->dev, "Failed to create debugfs dir\n");
- err = -ENOMEM;
- goto err_dbg_create_dir;
- }
-
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci);
if (err) {
@@ -1870,9 +1736,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- debugfs_remove_recursive(mlxsw_pci->dbg_dir);
-err_dbg_create_dir:
- pci_disable_msix(mlxsw_pci->pdev);
+ pci_free_irq_vectors(mlxsw_pci->pdev);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1892,8 +1756,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- debugfs_remove_recursive(mlxsw_pci->dbg_dir);
- pci_disable_msix(mlxsw_pci->pdev);
+ pci_free_irq_vectors(mlxsw_pci->pdev);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
@@ -1916,15 +1779,11 @@ EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
static int __init mlxsw_pci_module_init(void)
{
- mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
- if (!mlxsw_pci_dbg_root)
- return -ENOMEM;
return 0;
}
static void __exit mlxsw_pci_module_exit(void)
{
- debugfs_remove_recursive(mlxsw_pci_dbg_root);
}
module_init(mlxsw_pci_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 3d42146473b3..c580abba8d34 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,20 +49,12 @@
#define MLXSW_PORT_MID 0xd000
-#define MLXSW_PORT_MAX_PHY_PORTS 0x40
-#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
-#define MLXSW_PORT_DEVID_BITS_OFFSET 10
-#define MLXSW_PORT_PHY_BITS_OFFSET 4
-#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
-
#define MLXSW_PORT_CPU_PORT 0x0
-#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
-#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+#define MLXSW_PORT_DONT_CARE 0xFF
#define MLXSW_PORT_MODULE_MAX_WIDTH 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d9616daf8a70..83b277c8090e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4125,6 +4125,60 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
*/
MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+/* Shared between ingress/egress */
+enum mlxsw_reg_ritr_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9,
+};
+
+/* reg_ritr_ingress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24);
+
+/* reg_ritr_ingress_counter_set_type
+ * Igress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8);
+
+/* reg_ritr_egress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24);
+
+/* reg_ritr_egress_counter_set_type
+ * Egress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8);
+
+static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
+ bool enable, bool egress)
+{
+ enum mlxsw_reg_ritr_counter_set_type set_type;
+
+ if (enable)
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
+ else
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
+
+ if (egress)
+ mlxsw_reg_ritr_egress_counter_index_set(payload, index);
+ else
+ mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+}
+
static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
{
MLXSW_REG_ZERO(ritr, payload);
@@ -4141,7 +4195,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
enum mlxsw_reg_ritr_if_type type,
- u16 rif, u16 mtu, const char *mac)
+ u16 rif, u16 vr_id, u16 mtu,
+ const char *mac)
{
bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
@@ -4153,6 +4208,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
@@ -4285,6 +4341,129 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
}
+/* RICNT - Router Interface Counter Register
+ * -----------------------------------------
+ * The RICNT register retrieves per port performance counters
+ */
+#define MLXSW_REG_RICNT_ID 0x800B
+#define MLXSW_REG_RICNT_LEN 0x100
+
+MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN);
+
+/* reg_ricnt_counter_index
+ * Counter index
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24);
+
+enum mlxsw_reg_ricnt_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09,
+};
+
+/* reg_ricnt_counter_set_type
+ * Counter Set Type for router interface counter
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8);
+
+enum mlxsw_reg_ricnt_opcode {
+ /* Nop. Supported only for read access*/
+ MLXSW_REG_RICNT_OPCODE_NOP = 0x00,
+ /* Clear. Setting the clr bit will reset the counter value for
+ * all counters of the specified Router Interface.
+ */
+ MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_ricnt_opcode
+ * Opcode
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4);
+
+/* reg_ricnt_good_unicast_packets
+ * good unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64);
+
+/* reg_ricnt_good_multicast_packets
+ * good multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64);
+
+/* reg_ricnt_good_broadcast_packets
+ * good broadcast packets
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64);
+
+/* reg_ricnt_good_unicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good unicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64);
+
+/* reg_ricnt_good_multicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good multicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64);
+
+/* reg_ritr_good_broadcast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good broadcast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64);
+
+/* reg_ricnt_error_packets
+ * A count of errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64);
+
+/* reg_ricnt_discrad_packets
+ * A count of non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64);
+
+/* reg_ricnt_error_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for errored frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64);
+
+/* reg_ricnt_discard_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64);
+
+static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
+ enum mlxsw_reg_ricnt_opcode op)
+{
+ MLXSW_REG_ZERO(ricnt, payload);
+ mlxsw_reg_ricnt_op_set(payload, op);
+ mlxsw_reg_ricnt_counter_index_set(payload, index);
+ mlxsw_reg_ricnt_counter_set_type_set(payload,
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
+}
+
/* RALTA - Router Algorithmic LPM Tree Allocation Register
* -------------------------------------------------------
* RALTA is used to allocate the LPM trees of the SHSPM method.
@@ -5504,6 +5683,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
mlxsw_reg_mpsc_rate_set(payload, rate);
}
+/* MGPC - Monitoring General Purpose Counter Set Register
+ * The MGPC register retrieves and sets the General Purpose Counter Set.
+ */
+#define MLXSW_REG_MGPC_ID 0x9081
+#define MLXSW_REG_MGPC_LEN 0x18
+
+MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
+
+enum mlxsw_reg_mgpc_counter_set_type {
+ /* No count */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_mgpc_counter_set_type
+ * Counter set type.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
+
+/* reg_mgpc_counter_index
+ * Counter index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
+
+enum mlxsw_reg_mgpc_opcode {
+ /* Nop */
+ MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
+ /* Clear counters */
+ MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_mgpc_opcode
+ * Opcode.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
+
+/* reg_mgpc_byte_counter
+ * Byte counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
+
+/* reg_mgpc_packet_counter
+ * Packet counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
+
+static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
+ enum mlxsw_reg_mgpc_opcode opcode,
+ enum mlxsw_reg_mgpc_counter_set_type set_type)
+{
+ MLXSW_REG_ZERO(mgpc, payload);
+ mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
+ mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
+ mlxsw_reg_mgpc_opcode_set(payload, opcode);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5960,6 +6203,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rgcr),
MLXSW_REG(ritr),
MLXSW_REG(ratr),
+ MLXSW_REG(ricnt),
MLXSW_REG(ralta),
MLXSW_REG(ralst),
MLXSW_REG(raltb),
@@ -5977,6 +6221,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index bce8c2e00630..9556d934714b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,11 +43,15 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
+ MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
MLXSW_RES_ID_ACL_MAX_REGIONS,
@@ -59,6 +63,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
+ MLXSW_RES_ID_MAX_LPM_TREES,
/* Internal resources.
* Determined by the SW, not queried from the HW.
@@ -75,11 +80,15 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+ [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
+ [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
[MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
@@ -91,6 +100,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+ [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
};
struct mlxsw_res {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16484f24b7db..88357cee7679 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -66,6 +66,8 @@
#include "port.h"
#include "trap.h"
#include "txheader.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -138,6 +140,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+ int err;
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+ if (err)
+ return err;
+ *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
+ *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
+ return 0;
+}
+
+static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+}
+
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index)
+{
+ int err;
+
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ p_counter_index);
+ if (err)
+ return err;
+ err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+ return 0;
+
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ counter_index);
+}
+
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -304,9 +360,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
-static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
{
- return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
+ return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
}
static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
@@ -319,8 +376,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
* updated according to the mtu value
*/
if (mlxsw_sp_span_is_egress_mirror(port)) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
- mlxsw_sp_span_mtu_to_buffsize(mtu));
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
@@ -357,8 +415,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
- mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+ port->dev->mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
@@ -745,19 +805,47 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
- bool pause_en, bool pfc_en, u16 delay)
+static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
{
- u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
- delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
- MLXSW_SP_PAUSE_DELAY;
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+ u16 delay)
+{
+ delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
+ BITS_PER_BYTE));
+ return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
+ mtu);
+}
+
+/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 58752
- if (pause_en || pfc_en)
- mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
- pg_size + delay, pg_size);
+static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+ u16 delay, bool pfc, bool pause)
+{
+ if (pfc)
+ return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
+ else if (pause)
+ return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
+ else
+ return 0;
+}
+
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
+ bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
else
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
}
int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
@@ -778,6 +866,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ bool lossy;
+ u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -789,7 +879,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
if (!configure)
continue;
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+
+ lossy = !(pfc || pause_en);
+ thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
+ pause_en);
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -966,8 +1061,9 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
}
-int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
- u16 vid_end, bool is_member, bool untagged)
+static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvm_pl;
@@ -984,6 +1080,26 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return err;
}
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
+ is_member, untagged);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
@@ -1368,7 +1484,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
tc->cls_mall);
return 0;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -1379,6 +1495,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
tc->cls_flower);
return 0;
+ case TC_CLSFLOWER_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
+ tc->cls_flower);
default:
return -EOPNOTSUPP;
}
@@ -1492,6 +1611,7 @@ err_port_pause_configure:
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(const char *payload);
+ bool cells_bytes;
};
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
@@ -1612,17 +1732,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
-static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
-{
- u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
-
- return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
-}
-
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
{
.str = "tc_transmit_queue_tc",
- .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
+ .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
+ .cells_bytes = true,
},
{
.str = "tc_no_buffer_discard_uc_tc",
@@ -1734,6 +1848,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1743,8 +1859,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
if (err)
return;
mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
- for (i = 0; i < len; i++)
+ for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+ if (!hw_stats[i].cells_bytes)
+ continue;
+ data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
+ data[data_index + i]);
+ }
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -2537,25 +2658,33 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
int i;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
u8 module, width, lane;
size_t alloc_size;
int i;
int err;
- alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+ alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_sp->ports)
return -ENOMEM;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ if (!mlxsw_sp->port_to_module) {
+ err = -ENOMEM;
+ goto err_port_to_module_alloc;
+ }
+
+ for (i = 1; i < max_ports; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
@@ -2575,6 +2704,8 @@ err_port_module_info_get:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->port_to_module);
+err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
return err;
}
@@ -2877,6 +3008,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
/* L3 traps */
MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
@@ -3158,6 +3290,18 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
+
+static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
+}
+
+static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3224,6 +3368,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
+ err = mlxsw_sp_dpipe_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
+ goto err_dpipe_init;
+ }
+
+ err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
+ goto err_dummy_fid_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3233,6 +3395,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
+err_dummy_fid_init:
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+err_dpipe_init:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
@@ -3255,6 +3423,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
@@ -3326,13 +3497,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
-static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
+static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
{
- struct mlxsw_sp_port **port = data;
+ struct mlxsw_sp_port **p_mlxsw_sp_port = data;
int ret = 0;
if (mlxsw_sp_port_dev_check(lower_dev)) {
- *port = netdev_priv(lower_dev);
+ *p_mlxsw_sp_port = netdev_priv(lower_dev);
ret = 1;
}
@@ -3341,18 +3512,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
-static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3362,15 +3533,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
+ &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3390,546 +3562,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
- unsigned long event)
-{
- switch (event) {
- case NETDEV_UP:
- if (!r)
- return true;
- r->ref_count++;
- return false;
- case NETDEV_DOWN:
- if (r && --r->ref_count == 0)
- return true;
- /* It is possible we already removed the RIF ourselves
- * if it was assigned to a netdev that is now a bridge
- * or LAG slave.
- */
- return false;
- }
-
- return false;
-}
-
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
- return i;
-
- return MLXSW_SP_INVALID_RIF;
-}
-
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
-{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev, u16 rif,
- bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
- l3_dev->mtu, l3_dev->dev_addr);
-
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
-
- return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- return NULL;
-
- INIT_LIST_HEAD(&r->nexthop_list);
- INIT_LIST_HEAD(&r->neigh_list);
- ether_addr_copy(r->addr, l3_dev->dev_addr);
- r->mtu = l3_dev->mtu;
- r->ref_count = 1;
- r->dev = l3_dev;
- r->rif = rif;
- r->f = f;
-
- return r;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_fid *f;
- struct mlxsw_sp_rif *r;
- u16 fid, rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return ERR_PTR(-ERANGE);
-
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
- if (err)
- return ERR_PTR(err);
-
- fid = mlxsw_sp_rif_sp_to_fid(rif);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
- }
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- return r;
-
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *r)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 fid = f->fid;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
-}
-
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_rif *r;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!r) {
- r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
- if (IS_ERR(r))
- return PTR_ERR(r);
- }
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
- r->f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
-
- return 0;
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
-}
-
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
- case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
- return 0;
-
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
-}
-
-static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
- struct net_device *lag_dev,
- unsigned long event, u16 vid)
-{
- struct net_device *port_dev;
- struct list_head *iter;
- int err;
-
- netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
- if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(lag_dev))
- return 0;
-
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, MLXSW_PORT_ROUTER_PORT, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
- u16 rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return -ERANGE;
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
-}
-
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
- unsigned long event)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
- case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
- unsigned long event)
-{
- struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_lag_master(real_dev))
- return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!mlxsw_sp_rif_should_config(r, event))
- goto out;
-
- if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_inetaddr_port_event(dev, event);
- else if (netif_is_lag_master(dev))
- err = mlxsw_sp_inetaddr_lag_event(dev, event);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
- else if (is_vlan_dev(dev))
- err = mlxsw_sp_inetaddr_vlan_event(dev, event);
-
-out:
- return notifier_from_errno(err);
-}
-
-static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
- const char *mac, int mtu)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (err)
- return err;
-
- mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
- mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
- mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
-{
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- return 0;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!r)
- return 0;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
- if (err)
- goto err_rif_edit;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- ether_addr_copy(r->addr, dev->dev_addr);
- r->mtu = dev->mtu;
-
- netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
-
- return 0;
-
-err_rif_fdb_op:
- mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
-err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
- return err;
-}
-
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid)
{
@@ -4220,7 +3852,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 lag_id)
+ struct net_device *lag_dev, u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
@@ -4238,6 +3870,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->lag_id = lag_id;
mlxsw_sp_vport->lagged = 1;
+ mlxsw_sp_vport->dev = lag_dev;
}
static void
@@ -4254,6 +3887,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
if (f)
f->leave(mlxsw_sp_vport);
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->lagged = 0;
}
@@ -4293,7 +3927,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
return 0;
@@ -4403,6 +4037,56 @@ static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
}
+static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
+ MLXSW_REG_SPMS_STATE_DISCARDING;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+
+ for (vid = 0; vid < VLAN_N_VID; vid++)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ true, false);
+ if (err)
+ goto err_port_vlan_set;
+ return 0;
+
+err_port_vlan_set:
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ return err;
+}
+
+static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ false, false);
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
unsigned long event, void *ptr)
{
@@ -4421,7 +4105,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
- !netif_is_bridge_master(upper_dev))
+ !netif_is_bridge_master(upper_dev) &&
+ !netif_is_ovs_master(upper_dev))
return -EINVAL;
if (!info->linking)
break;
@@ -4438,6 +4123,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
return -EINVAL;
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4446,8 +4135,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
@@ -4461,6 +4150,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
else
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ } else if (netif_is_ovs_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
+ else
+ mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4552,8 +4246,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f;
f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (f && f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f && f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
if (f && --f->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp, f);
}
@@ -4564,33 +4258,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(br_dev);
if (!mlxsw_sp)
return 0;
- if (br_dev != mlxsw_sp->master_bridge.dev)
- return 0;
info = ptr;
switch (event) {
- case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev))
- break;
- if (info->linking) {
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- if (err)
- return err;
+ return -EINVAL;
+ if (is_vlan_dev(upper_dev) &&
+ br_dev != mlxsw_sp->master_bridge.dev)
+ return -EINVAL;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (is_vlan_dev(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ else
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
+ upper_dev);
} else {
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return 0;
+ return err;
}
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
@@ -4657,8 +4358,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
clear_bit(vfid, mlxsw_sp->vfids.mapped);
list_del(&f->list);
- if (f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
kfree(f);
@@ -4810,6 +4511,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport)
+ return 0;
switch (event) {
case NETDEV_PRECHANGEUPPER:
@@ -4821,22 +4524,24 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
- if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (info->linking) {
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
+ upper_dev);
+ else
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
} else {
- if (!mlxsw_sp_vport)
- return 0;
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ err = -EINVAL;
+ WARN_ON(1);
}
+ break;
}
return err;
@@ -4878,6 +4583,15 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
+ return false;
+ return netif_is_l3_master(info->upper_dev);
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -4886,6 +4600,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (mlxsw_sp_is_vrf_event(event, ptr))
+ err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
else if (netif_is_lag_master(dev))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13ec85e7c392..0c23bc1e946d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -57,41 +57,21 @@
#define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
+#define MLXSW_SP_DUMMY_FID 15359
+
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
-#define MLXSW_SP_LPM_TREE_MAX 22
-#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
-#define MLXSW_SP_BYTES_PER_CELL 96
-
-#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
-#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
-
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
-/* Maximum delay buffer needed in case of PAUSE frames, in cells.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 612
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
-{
- delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
- return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
-}
-
struct mlxsw_sp_port;
+struct mlxsw_sp_rif;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -103,21 +83,10 @@ struct mlxsw_sp_fid {
struct list_head list;
unsigned int ref_count;
struct net_device *dev;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
u16 fid;
};
-struct mlxsw_sp_rif {
- struct list_head nexthop_list;
- struct list_head neigh_list;
- struct net_device *dev;
- unsigned int ref_count;
- struct mlxsw_sp_fid *f;
- unsigned char addr[ETH_ALEN];
- int mtu;
- u16 rif;
-};
-
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -138,17 +107,7 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
-}
-
-static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
-{
- return fid >= MLXSW_SP_RFID_BASE;
-}
-
-static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
-{
- return MLXSW_SP_RFID_BASE + rif;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
}
struct mlxsw_sp_sb_pr {
@@ -177,12 +136,15 @@ struct mlxsw_sp_sb_pm {
#define MLXSW_SP_SB_POOL_COUNT 4
#define MLXSW_SP_SB_TC_COUNT 8
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
- struct {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
- } ports[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
};
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
@@ -207,11 +169,9 @@ struct mlxsw_sp_fib;
struct mlxsw_sp_vr {
u16 id; /* virtual router ID */
- bool used;
- enum mlxsw_sp_l3proto proto;
u32 tb_id; /* kernel fib table id */
- struct mlxsw_sp_lpm_tree *lpm_tree;
- struct mlxsw_sp_fib *fib;
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
};
enum mlxsw_sp_span_type {
@@ -253,12 +213,15 @@ struct mlxsw_sp_port_mall_tc_entry {
};
struct mlxsw_sp_router {
- struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
struct rhashtable nexthop_ht;
struct {
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
struct delayed_work dw;
unsigned long interval; /* ms */
} neighs_update;
@@ -269,6 +232,7 @@ struct mlxsw_sp_router {
};
struct mlxsw_sp_acl;
+struct mlxsw_sp_counter_pool;
struct mlxsw_sp {
struct {
@@ -296,7 +260,7 @@ struct mlxsw_sp {
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags;
- u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+ u8 *port_to_module;
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
struct mlxsw_sp_acl *acl;
@@ -304,6 +268,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
+ struct mlxsw_sp_counter_pool *counter_pool;
struct {
struct mlxsw_sp_span_entry *entries;
int entries_count;
@@ -317,6 +282,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id];
}
+static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
+ u32 cells)
+{
+ return mlxsw_sp->sb.cell_size * cells;
+}
+
+static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
+ u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
+}
+
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -386,6 +363,7 @@ struct mlxsw_sp_port {
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-static inline struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
-
- return NULL;
-}
-
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BC,
@@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -608,10 +571,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+ u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
@@ -620,6 +589,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
+ unsigned int counter_index;
+ bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -639,6 +610,8 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
};
struct mlxsw_sp_acl_ops {
@@ -679,6 +652,14 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio);
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid);
struct mlxsw_sp_acl_rule;
@@ -698,6 +679,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
unsigned long cookie);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -708,5 +692,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8a18b3aa70dc..317f7b14627f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/tc_act/tc_vlan.h>
#include "reg.h"
#include "core.h"
@@ -49,10 +50,17 @@
#include "spectrum_acl_flex_keys.h"
struct mlxsw_sp_acl {
+ struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
+ struct list_head rules;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
+ } rule_activity_update;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_rule {
struct rhash_head ht_node; /* Member of rule HT */
+ struct list_head list;
unsigned long cookie; /* HT key */
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule_info *rulei;
+ u64 last_used;
+ u64 last_packets;
+ u64 last_bytes;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
+static int
+mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ rulei->counter_valid = false;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -335,6 +368,48 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio)
+{
+ u8 ethertype;
+
+ if (action == TCA_VLAN_ACT_MODIFY) {
+ switch (proto) {
+ case ETH_P_8021Q:
+ ethertype = 0;
+ break;
+ case ETH_P_8021AD:
+ ethertype = 1;
+ break;
+ default:
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
+ proto);
+ return -EINVAL;
+ }
+
+ return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
+ vid, prio, ethertype);
+ } else {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
+ return -EINVAL;
+ }
+}
+
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_counter(rulei->act_block,
+ rulei->counter_index);
+}
+
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid)
+{
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
@@ -358,8 +433,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
}
+
+ err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
+ if (err)
+ goto err_counter_alloc;
return rule;
+err_counter_alloc:
+ mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -372,6 +453,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -393,6 +475,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
return 0;
err_rhashtable_insert:
@@ -406,6 +489,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
@@ -426,6 +510,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
return rule->rulei;
}
+static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ bool active;
+ int err;
+
+ err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
+ if (err)
+ return err;
+ if (active)
+ rule->last_used = jiffies;
+ return 0;
+}
+
+static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
+{
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ list_for_each_entry(rule, &acl->rules, list) {
+ err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
+ rule);
+ if (err)
+ goto err_rule_update;
+ }
+ rtnl_unlock();
+ return 0;
+
+err_rule_update:
+ rtnl_unlock();
+ return err;
+}
+
+static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
+{
+ unsigned long interval = acl->rule_activity_update.interval;
+
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
+ rule_activity_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_acl_rules_activity_update(acl);
+ if (err)
+ dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
+
+ mlxsw_sp_acl_rule_activity_work_schedule(acl);
+}
+
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets - rule->last_packets;
+ *bytes = current_bytes - rule->last_bytes;
+ *last_use = rule->last_used;
+
+ rule->last_bytes = current_bytes;
+ rule->last_packets = current_packets;
+
+ return 0;
+}
+
#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
@@ -434,7 +602,6 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
u32 kvdl_index;
- int ret;
int err;
/* The first action set of a TCAM entry is stored directly in TCAM,
@@ -443,10 +610,10 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
- if (ret < 0)
- return ret;
- kvdl_index = ret;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
+ &kvdl_index);
+ if (err)
+ return err;
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
@@ -475,13 +642,11 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
struct mlxsw_sp *mlxsw_sp = priv;
char ppbs_pl[MLXSW_REG_PPBS_LEN];
u32 kvdl_index;
- int ret;
int err;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
- if (ret < 0)
- return ret;
- kvdl_index = ret;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ if (err)
+ return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
if (err)
@@ -518,7 +683,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
-
+ acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
mlxsw_sp_afk_blocks,
@@ -541,11 +706,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
goto err_acl_ops_init;
acl->ops = acl_ops;
+
+ /* Create the delayed work for the rule activity_update */
+ INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
+ mlxsw_sp_acl_rul_activity_update_work);
+ acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
err_acl_ops_init:
@@ -564,7 +736,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
+ cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
+ WARN_ON(!list_empty(&acl->rules));
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index 82b81cf7f4a7..af7b7bad48df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -39,11 +39,15 @@
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
@@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7382832215fa..3a24289979d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
+static int
+mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ region->tcam_region_info, offset);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
static int
@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
+static int
+mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
+ entry->parman_item.index,
+ activity);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC,
@@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_DST_IP4,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
@@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
}
+static int
+mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+ activity);
+}
+
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
.ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
@@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
static const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index a7468262f118..997189cfe7fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -162,8 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
static const u16 mlxsw_sp_pbs[] = {
- [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
- [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
+ [0] = 2 * ETH_FRAME_LEN,
+ [9] = 2 * MLXSW_PORT_MAX_MTU,
};
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
@@ -171,20 +171,22 @@ static const u16 mlxsw_sp_pbs[] = {
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
int i;
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+ u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
+
if (i == MLXSW_SP_PB_UNUSED)
continue;
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
}
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
- MLXSW_REG(pbmc), pbmc_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
}
static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -209,11 +211,25 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
}
-#define MLXSW_SP_SB_PR_INGRESS_SIZE \
- (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb.ports)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->sb.ports);
+}
+
+#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
-#define MLXSW_SP_SB_PR_EGRESS_SIZE \
- (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
#define MLXSW_SP_SB_PR(_mode, _size) \
{ \
@@ -223,18 +239,17 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+ MLXSW_SP_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
+ MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
};
#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -251,11 +266,9 @@ static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
int err;
for (i = 0; i < prs_len; i++) {
- const struct mlxsw_sp_sb_pr *pr;
+ u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
- pr = &prs[i];
- err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
- pr->mode, pr->size);
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
if (err)
return err;
}
@@ -284,7 +297,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
}
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+ MLXSW_SP_SB_CM(10000, 8, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
@@ -293,20 +306,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
+ MLXSW_SP_SB_CM(20000, 1, 3),
};
#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
@@ -330,7 +343,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
+ MLXSW_SP_SB_CM(10000, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -370,13 +383,17 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
+ u32 min_buff;
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
+ /* All pools are initialized using dynamic thresholds,
+ * therefore 'max_buff' isn't specified in cells.
+ */
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
- cm->min_buff, cm->max_buff,
- cm->pool);
+ min_buff, cm->max_buff, cm->pool);
if (err)
return err;
}
@@ -484,21 +501,21 @@ struct mlxsw_sp_sb_mm {
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -511,10 +528,15 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
const struct mlxsw_sp_sb_mm *mc;
+ u32 min_buff;
mc = &mlxsw_sp_sb_mms[i];
- mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
- mc->max_buff, mc->pool);
+ /* All pools are initialized using dynamic thresholds,
+ * therefore 'max_buff' isn't specified in cells.
+ */
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
+ mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
return err;
@@ -522,32 +544,53 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
-
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
+ u64 sb_size;
int err;
- err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
+ return -EIO;
+ mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ return -EIO;
+ sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+
+ err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
return err;
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+ if (err)
+ goto err_sb_prs_init;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
- return err;
+ goto err_sb_cpu_port_sb_cms_init;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
- return err;
- return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- MLXSW_SP_SB_SIZE,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_TC_COUNT,
- MLXSW_SP_SB_TC_COUNT);
+ goto err_sb_mms_init;
+ err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_TC_COUNT,
+ MLXSW_SP_SB_TC_COUNT);
+ if (err)
+ goto err_devlink_sb_register;
+
+ return 0;
+
+err_devlink_sb_register:
+err_sb_mms_init:
+err_sb_cpu_port_sb_cms_init:
+err_sb_prs_init:
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -596,7 +639,7 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
- pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+ pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -606,9 +649,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
enum devlink_sb_threshold_type threshold_type)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
enum mlxsw_reg_sbpr_mode mode;
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
@@ -627,7 +670,7 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
- return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+ return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
}
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -645,7 +688,7 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
return -EINVAL;
*p_max_buff = val;
} else {
- *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+ *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
}
return 0;
}
@@ -761,7 +804,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
- local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -775,7 +818,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
}
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
- local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -817,7 +860,7 @@ next_batch:
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
- for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -847,7 +890,7 @@ do_query:
cb_priv);
if (err)
goto out;
- if (local_port < MLXSW_PORT_MAX_PORTS)
+ if (local_port < mlxsw_core_max_ports(mlxsw_core))
goto next_batch;
out:
@@ -882,7 +925,7 @@ next_batch:
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
- for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -908,7 +951,7 @@ do_query:
&bulk_list, NULL, 0);
if (err)
goto out;
- if (local_port < MLXSW_PORT_MAX_PORTS)
+ if (local_port < mlxsw_core_max_ports(mlxsw_core))
goto next_batch;
out:
@@ -932,8 +975,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool, dir);
- *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
- *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
return 0;
}
@@ -951,7 +994,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
- *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
- *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644
index 000000000000..0f46775e0307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum_cnt.h"
+
+#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
+
+struct mlxsw_sp_counter_sub_pool {
+ unsigned int base_index;
+ unsigned int size;
+ unsigned int entry_size;
+ unsigned int bank_count;
+};
+
+struct mlxsw_sp_counter_pool {
+ unsigned int pool_size;
+ unsigned long *usage; /* Usage bitmap */
+ struct mlxsw_sp_counter_sub_pool *sub_pools;
+};
+
+static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+ [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .bank_count = 6,
+ },
+ [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+ .bank_count = 2,
+ }
+};
+
+static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int total_bank_config = 0;
+ unsigned int pool_size;
+ int i;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ /* Check config is valid, no bank over subscription */
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
+ total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
+ if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
+ return -EINVAL;
+ return 0;
+}
+
+static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+
+ /* Prepare generic flow pool*/
+ sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
+ return -EIO;
+ sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ COUNTER_SIZE_PACKETS_BYTES);
+ /* Prepare erif pool*/
+ sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
+ return -EIO;
+ sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ COUNTER_SIZE_ROUTER_BASIC);
+ return 0;
+}
+
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ struct mlxsw_sp_counter_pool *pool;
+ unsigned int base_index;
+ unsigned int map_size;
+ int i;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
+ return -EIO;
+
+ err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
+ if (err)
+ return err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
+
+ pool->usage = kzalloc(map_size, GFP_KERNEL);
+ if (!pool->usage) {
+ err = -ENOMEM;
+ goto err_usage_alloc;
+ }
+
+ pool->sub_pools = mlxsw_sp_counter_sub_pools;
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &pool->sub_pools[i];
+ sub_pool->size = sub_pool->bank_count *
+ MLXSW_SP_COUNTER_POOL_BANK_SIZE;
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ /* The last bank can't be fully used */
+ if (sub_pool->base_index + sub_pool->size > pool->pool_size)
+ sub_pool->size = pool->pool_size - sub_pool->base_index;
+ }
+
+ mlxsw_sp->counter_pool = pool;
+ return 0;
+
+err_usage_alloc:
+ kfree(pool);
+ return err;
+}
+
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+
+ WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+ pool->pool_size);
+ kfree(pool->usage);
+ kfree(pool);
+}
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int entry_index;
+ unsigned int stop_index;
+ int i;
+
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ stop_index = sub_pool->base_index + sub_pool->size;
+ entry_index = sub_pool->base_index;
+
+ entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
+ if (entry_index == stop_index)
+ return -ENOBUFS;
+ /* The sub-pools can contain non-integer number of entries
+ * so we must check for overflow
+ */
+ if (entry_index + sub_pool->entry_size > stop_index)
+ return -ENOBUFS;
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __set_bit(entry_index + i, pool->usage);
+
+ *p_counter_index = entry_index;
+ return 0;
+}
+
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
+
+ if (WARN_ON(counter_index >= pool->pool_size))
+ return;
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __clear_bit(counter_index + i, pool->usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644
index 000000000000..fd34d0a01073
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -0,0 +1,54 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_CNT_H
+#define _MLXSW_SPECTRUM_CNT_H
+
+#include "spectrum.h"
+
+enum mlxsw_sp_counter_sub_pool_id {
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ MLXSW_SP_COUNTER_SUB_POOL_RIF,
+};
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index);
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index);
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
new file mode 100644
index 000000000000..ea56f6ade6b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -0,0 +1,351 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+enum mlxsw_sp_field_metadata_id {
+ MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+};
+
+static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
+ { .name = "erif_port",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ .bitwidth = 32,
+ .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+ },
+ { .name = "l3_forward",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ .bitwidth = 1,
+ },
+ { .name = "l3_drop",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ .bitwidth = 1,
+ },
+};
+
+enum mlxsw_sp_dpipe_header_id {
+ MLXSW_SP_DPIPE_HEADER_METADATA,
+};
+
+static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = {
+ .name = "mlxsw_meta",
+ .id = MLXSW_SP_DPIPE_HEADER_METADATA,
+ .fields = mlxsw_sp_dpipe_fields_metadata,
+ .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata),
+};
+
+static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = {
+ &mlxsw_sp_dpipe_header_metadata,
+};
+
+static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = {
+ .headers = mlxsw_dpipe_headers,
+ .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers),
+};
+
+static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry)
+{
+ unsigned int value_count, value_index;
+ struct devlink_dpipe_value *value;
+
+ value = entry->action_values;
+ value_count = entry->action_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+
+ value = entry->match_values;
+ value_count = entry->match_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+}
+
+static void
+mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match,
+ struct devlink_dpipe_action *action)
+{
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_value,
+ struct devlink_dpipe_match *match,
+ struct devlink_dpipe_value *action_value,
+ struct devlink_dpipe_action *action)
+{
+ entry->match_values = match_value;
+ entry->match_values_count = 1;
+
+ entry->action_values = action_value;
+ entry->action_values_count = 1;
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ goto err_action_alloc;
+ return 0;
+
+err_action_alloc:
+ kfree(match_value->value);
+ return -ENOMEM;
+}
+
+static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_rif *rif,
+ bool counters_enabled)
+{
+ u32 *action_value;
+ u32 *rif_value;
+ u64 cnt;
+ int err;
+
+ /* Set Match RIF index */
+ rif_value = entry->match_values->value;
+ *rif_value = mlxsw_sp_rif_index(rif);
+ entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ entry->match_values->mapping_valid = true;
+
+ /* Set Action Forwarding */
+ action_value = entry->action_values->value;
+ *action_value = 1;
+
+ entry->counter_valid = false;
+ entry->counter = 0;
+ if (!counters_enabled)
+ return 0;
+
+ entry->index = mlxsw_sp_rif_index(rif);
+ err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &cnt);
+ if (!err) {
+ entry->counter = cnt;
+ entry->counter_valid = true;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+ struct devlink_dpipe_action action = {0};
+ struct devlink_dpipe_match match = {0};
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ unsigned int rif_count;
+ int i, j;
+ int err;
+
+ mlxsw_sp_erif_match_action_prepare(&match, &action);
+ err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
+ &action_value, &action);
+ if (err)
+ return err;
+
+ rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ rtnl_lock();
+ i = 0;
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ return err;
+ j = 0;
+ for (; i < rif_count; i++) {
+ if (!mlxsw_sp->rifs[i])
+ continue;
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
+ mlxsw_sp->rifs[i],
+ counters_enabled);
+ if (err)
+ goto err_entry_get;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ j++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (i != rif_count)
+ goto start_again;
+ rtnl_unlock();
+
+ mlxsw_sp_erif_entry_clear(&entry);
+ return 0;
+err_entry_append:
+err_entry_get:
+ rtnl_unlock();
+ mlxsw_sp_erif_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int i;
+
+ rtnl_lock();
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ if (!mlxsw_sp->rifs[i])
+ continue;
+ if (enable)
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp,
+ mlxsw_sp->rifs[i],
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ else
+ mlxsw_sp_rif_counter_free(mlxsw_sp,
+ mlxsw_sp->rifs[i],
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
+ .entries_dump = mlxsw_sp_table_erif_entries_dump,
+ .counters_set_update = mlxsw_sp_table_erif_counters_update,
+};
+
+static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ u64 table_size;
+
+ table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ return devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, table_size,
+ false);
+}
+
+static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+}
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = devlink_dpipe_headers_register(devlink,
+ &mlxsw_sp_dpipe_headers);
+ if (err)
+ return err;
+ err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
+ if (err)
+ goto err_erif_register;
+ return 0;
+
+err_erif_register:
+ devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ return err;
+}
+
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
+ devlink_dpipe_headers_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
new file mode 100644
index 000000000000..d2089298cba3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PIPELINE_H_
+#define _MLXSW_PIPELINE_H_
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
+
+#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
+
+#endif /* _MLXSW_PIPELINE_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ae6cccc666e4..7d87e23578a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -39,6 +39,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "spectrum.h"
#include "core_acl_flex_keys.h"
@@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (tc_no_actions(exts))
return 0;
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ if (err)
+ return err;
+
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
@@ -65,6 +71,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
+ MLXSW_SP_DUMMY_FID);
+ if (err)
+ return err;
+
out_dev = __dev_get_by_index(dev_net(dev), ifindex);
if (out_dev == dev)
out_dev = NULL;
@@ -73,6 +84,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
out_dev);
if (err)
return err;
+ } else if (is_tcf_vlan(a)) {
+ u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
+ u32 action = tcf_vlan_action(a);
+ u8 prio = tcf_vlan_push_prio(a);
+ u16 vid = tcf_vlan_push_vid(a);
+
+ return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ action, vid,
+ proto, prio);
} else {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
@@ -173,7 +193,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
}
@@ -234,6 +255,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
sizeof(key->src));
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VID,
+ key->vlan_id,
+ mask->vlan_id);
+ if (mask->vlan_priority != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_PCP,
+ key->vlan_priority,
+ mask->vlan_priority);
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
mlxsw_sp_flower_parse_ipv4(rulei, f);
@@ -314,3 +356,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+ ingress,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (WARN_ON(IS_ERR(ruleset)))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
+ &lastuse);
+ if (err)
+ goto err_rule_get_stats;
+
+ preempt_disable();
+
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ preempt_enable();
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_get_stats:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index ac321e8e5c1a..26c26cd30c3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -45,7 +45,8 @@
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
#define MLXSW_SP_CHUNK_MAX 32
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+ u32 *p_entry_index)
{
int entry_index;
int size;
@@ -72,7 +73,8 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
for (i = 0; i < type_entries; i++)
set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
- return entry_index;
+ *p_entry_index = entry_index;
+ return 0;
}
return -ENOBUFS;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index bd8de6b9be71..33cec1cc1642 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -41,14 +41,184 @@
#include <linux/in6.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
#include <net/ip_fib.h>
+#include <net/fib_rules.h>
+#include <net/l3mdev.h>
#include "spectrum.h"
#include "core.h"
#include "reg.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_rif {
+ struct list_head nexthop_list;
+ struct list_head neigh_list;
+ struct net_device *dev;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif_index;
+ u16 vr_id;
+ unsigned int counter_ingress;
+ bool counter_ingress_valid;
+ unsigned int counter_egress;
+ bool counter_egress_valid;
+};
+
+static unsigned int *
+mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return &rif->counter_egress;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return &rif->counter_ingress;
+ }
+ return NULL;
+}
+
+static bool
+mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return rif->counter_egress_valid;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return rif->counter_ingress_valid;
+ }
+ return false;
+}
+
+static void
+mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ bool valid)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ rif->counter_egress_valid = valid;
+ break;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ rif->counter_ingress_valid = valid;
+ break;
+ }
+}
+
+static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ unsigned int counter_index, bool enable,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ bool is_egress = false;
+ int err;
+
+ if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
+ is_egress = true;
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
+ is_egress);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ bool valid;
+ int err;
+
+ valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
+ if (!valid)
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_NOP);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+ *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
+ return 0;
+}
+
+static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+}
+
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ unsigned int *p_counter_index;
+ int err;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ p_counter_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+
+ err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, true, dir);
+ if (err)
+ goto err_counter_edit;
+ mlxsw_sp_rif_counter_valid_set(rif, dir, true);
+ return 0;
+
+err_counter_edit:
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ unsigned int *p_counter_index;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (WARN_ON(!p_counter_index))
+ return;
+ mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, false, dir);
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ mlxsw_sp_rif_counter_valid_set(rif, dir, false);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -89,12 +259,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
}
static void
-mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- memset(prefix_usage, 0, sizeof(*prefix_usage));
-}
-
-static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len)
{
@@ -125,7 +289,7 @@ struct mlxsw_sp_fib_node {
struct list_head entry_list;
struct list_head list;
struct rhash_head ht_node;
- struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_fib_key key;
};
@@ -149,13 +313,17 @@ struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
+ enum mlxsw_sp_l3proto proto;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_fib *fib;
int err;
@@ -167,6 +335,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
if (err)
goto err_rhashtable_init;
INIT_LIST_HEAD(&fib->node_list);
+ fib->proto = proto;
+ fib->vr = vr;
return fib;
err_rhashtable_init:
@@ -177,24 +347,21 @@ err_rhashtable_init:
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{
WARN_ON(!list_empty(&fib->node_list));
+ WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
static struct mlxsw_sp_lpm_tree *
-mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
{
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->ref_count == 0) {
- if (one_reserved)
- one_reserved = false;
- else
- return lpm_tree;
- }
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ if (lpm_tree->ref_count == 0)
+ return lpm_tree;
}
return NULL;
}
@@ -248,12 +415,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
@@ -283,13 +450,13 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -297,7 +464,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
goto inc_ref_count;
}
lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
- proto, one_reserved);
+ proto);
if (IS_ERR(lpm_tree))
return lpm_tree;
@@ -314,15 +481,41 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+
+static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
+ u64 max_trees;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
+ return -EIO;
+
+ max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
+ mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+ sizeof(struct mlxsw_sp_lpm_tree),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.lpm.trees)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.lpm.trees);
+}
+
+static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
+{
+ return !!vr->fib4;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -332,31 +525,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
return NULL;
}
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto,
- vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -369,8 +562,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
struct mlxsw_sp_vr *vr;
int i;
@@ -379,69 +571,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
return NULL;
}
+static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return vr->fib4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage;
- struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_vr *vr;
- int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr)
return ERR_PTR(-EBUSY);
- vr->fib = mlxsw_sp_fib_create();
- if (IS_ERR(vr->fib))
- return ERR_CAST(vr->fib);
-
- vr->proto = proto;
+ vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr->fib4))
+ return ERR_CAST(vr->fib4);
vr->tb_id = tb_id;
- mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- proto, true);
- if (IS_ERR(lpm_tree)) {
- err = PTR_ERR(lpm_tree);
- goto err_tree_get;
- }
- vr->lpm_tree = lpm_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
- if (err)
- goto err_tree_bind;
-
- vr->used = true;
return vr;
-
-err_tree_bind:
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
-err_tree_get:
- mlxsw_sp_fib_destroy(vr->fib);
-
- return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
- mlxsw_sp_fib_destroy(vr->fib);
- vr->used = false;
+ mlxsw_sp_fib_destroy(vr->fib4);
+ vr->fib4 = NULL;
}
static int
-mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
struct mlxsw_sp_prefix_usage *req_prefix_usage)
{
- struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
struct mlxsw_sp_lpm_tree *new_tree;
int err;
@@ -449,7 +622,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
- vr->proto, false);
+ fib->proto);
if (IS_ERR(new_tree)) {
/* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good
@@ -463,8 +636,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
}
/* Prevent packet loss by overwriting existing binding */
- vr->lpm_tree = new_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ fib->lpm_tree = new_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
if (err)
goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
@@ -472,53 +645,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
err_tree_bind:
- vr->lpm_tree = lpm_tree;
+ fib->lpm_tree = lpm_tree;
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
return err;
}
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
{
struct mlxsw_sp_vr *vr;
- int err;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
- if (!vr) {
- vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
- if (IS_ERR(vr))
- return vr;
- } else {
- struct mlxsw_sp_prefix_usage req_prefix_usage;
-
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &vr->fib->prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- /* Need to replace LPM tree in case new prefix is required. */
- err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &req_prefix_usage);
- if (err)
- return ERR_PTR(err);
- }
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{
- /* Destroy virtual router entity in case the associated FIB is empty
- * and allow it to be used for other tables in future. Otherwise,
- * check if some prefix usage did not disappear and change tree if
- * that is the case. Note that in case new, smaller tree cannot be
- * allocated, the original one will be kept being used.
- */
- if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
- mlxsw_sp_vr_destroy(mlxsw_sp, vr);
- else
- mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &vr->fib->prefix_usage);
+ if (!vr->rif_count && list_empty(&vr->fib4->node_list))
+ mlxsw_sp_vr_destroy(vr);
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -627,14 +773,14 @@ static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
int err;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+ if (!rif)
return ERR_PTR(-EINVAL);
- neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
+ neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
if (!neigh_entry)
return ERR_PTR(-ENOMEM);
@@ -642,7 +788,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
if (err)
goto err_neigh_entry_insert;
- list_add(&neigh_entry->rif_list_node, &r->neigh_list);
+ list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
return neigh_entry;
@@ -1050,22 +1196,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif *r)
+ const struct mlxsw_sp_rif *rif)
{
char rauht_pl[MLXSW_REG_RAUHT_LEN];
mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
- r->rif, r->addr);
+ rif->rif_index, rif->addr);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
- mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
- list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
+ list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
rif_list_node)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
}
@@ -1082,7 +1228,7 @@ struct mlxsw_sp_nexthop {
*/
struct rhash_head ht_node;
struct mlxsw_sp_nexthop_key key;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
u8 should_offload:1, /* set indicates this neigh is connected and
* should be put to KVD linear area of this group.
*/
@@ -1109,7 +1255,7 @@ struct mlxsw_sp_nexthop_group {
u16 ecmp_size;
u16 count;
struct mlxsw_sp_nexthop nexthops[0];
-#define nh_rif nexthops[0].r
+#define nh_rif nexthops[0].rif
};
static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
@@ -1171,7 +1317,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr,
+ const struct mlxsw_sp_fib *fib,
u32 adj_index, u16 ecmp_size,
u32 new_adj_index,
u16 new_ecmp_size)
@@ -1179,8 +1325,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
- adj_index, ecmp_size, new_adj_index,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->vr->id, adj_index, ecmp_size, new_adj_index,
new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1190,14 +1336,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
u32 old_adj_index, u16 old_ecmp_size)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr = NULL;
+ struct mlxsw_sp_fib *fib = NULL;
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (vr == fib_entry->fib_node->vr)
+ if (fib == fib_entry->fib_node->fib)
continue;
- vr = fib_entry->fib_node->vr;
- err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ fib = fib_entry->fib_node->fib;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
old_adj_index,
old_ecmp_size,
nh_grp->adj_index,
@@ -1280,7 +1426,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
bool old_adj_index_valid;
u32 old_adj_index;
u16 old_ecmp_size;
- int ret;
int i;
int err;
@@ -1318,15 +1463,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
*/
goto set_trap;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
- if (ret < 0) {
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
*/
dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
goto set_trap;
}
- adj_index = ret;
old_adj_index_valid = nh_grp->adj_index_valid;
old_adj_index = nh_grp->adj_index;
old_ecmp_size = nh_grp->ecmp_size;
@@ -1399,22 +1543,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
- if (nh->r)
+ if (nh->rif)
return;
- nh->r = r;
- list_add(&nh->rif_list_node, &r->nexthop_list);
+ nh->rif = rif;
+ list_add(&nh->rif_list_node, &rif->nexthop_list);
}
static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
{
- if (!nh->r)
+ if (!nh->rif)
return;
list_del(&nh->rif_list_node);
- nh->r = NULL;
+ nh->rif = NULL;
}
static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
@@ -1505,7 +1649,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = fib_nh->nh_dev;
struct in_device *in_dev;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
int err;
nh->nh_grp = nh_grp;
@@ -1514,15 +1658,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ if (!dev)
+ return 0;
+
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN)
return 0;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
return 0;
- mlxsw_sp_nexthop_rif_init(nh, r);
+ mlxsw_sp_nexthop_rif_init(nh, rif);
err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
if (err)
@@ -1548,7 +1695,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_key key;
struct mlxsw_sp_nexthop *nh;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
if (mlxsw_sp->router.aborted)
return;
@@ -1558,13 +1705,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON_ONCE(!nh))
return;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
+ if (!rif)
return;
switch (event) {
case FIB_EVENT_NH_ADD:
- mlxsw_sp_nexthop_rif_init(nh, r);
+ mlxsw_sp_nexthop_rif_init(nh, rif);
mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
break;
case FIB_EVENT_NH_DEL:
@@ -1577,11 +1724,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_nexthop *nh, *tmp;
- list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
+ list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
@@ -1699,7 +1846,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
{
fib_entry->offloaded = true;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_inc(fib_entry->nh_group->key.fi);
break;
@@ -1711,7 +1858,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
static void
mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
{
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_dec(fib_entry->nh_group->key.fi);
break;
@@ -1751,8 +1898,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -1772,8 +1919,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
@@ -1784,27 +1931,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
u16 trap_id = 0;
- u16 rif = 0;
+ u16 rif_index = 0;
if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- rif = r->rif;
+ rif_index = rif->rif_index;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1812,13 +1960,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1845,7 +1993,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = -EINVAL;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
break;
@@ -1877,17 +2025,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{
struct fib_info *fi = fen_info->fi;
- if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
+ switch (fen_info->type) {
+ case RTN_BROADCAST: /* fall through */
+ case RTN_LOCAL:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
- }
- if (fen_info->type != RTN_UNICAST)
- return -EINVAL;
- if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ case RTN_UNREACHABLE: /* fall through */
+ case RTN_BLACKHOLE: /* fall through */
+ case RTN_PROHIBIT:
+ /* Packets hitting these routes need to be trapped, but
+ * can do so with a lower priority than packets directed
+ * at the host, so use action type local instead of trap.
+ */
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- else
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return 0;
+ return 0;
+ case RTN_UNICAST:
+ if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct mlxsw_sp_fib_entry *
@@ -1996,7 +2156,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
}
static struct mlxsw_sp_fib_node *
-mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
{
struct mlxsw_sp_fib_node *fib_node;
@@ -2006,18 +2166,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
return NULL;
INIT_LIST_HEAD(&fib_node->entry_list);
- list_add(&fib_node->list, &vr->fib->node_list);
+ list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
- mlxsw_sp_fib_node_insert(vr->fib, fib_node);
- fib_node->vr = vr;
return fib_node;
}
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
- mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
list_del(&fib_node->list);
WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
@@ -2034,7 +2191,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
@@ -2043,32 +2200,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
}
+static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ err = mlxsw_sp_fib_node_insert(fib, fib_node);
+ if (err)
+ return err;
+ fib_node->fib = fib;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+
+ if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
+ &req_prefix_usage);
+ if (err)
+ goto err_tree_check;
+ } else {
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return PTR_ERR(lpm_tree);
+ fib->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
+ if (err)
+ goto err_tree_bind;
+ }
+
+ mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+ return 0;
+
+err_tree_bind:
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_tree_check:
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+
+ mlxsw_sp_fib_node_prefix_dec(fib_node);
+
+ if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ } else {
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
+ }
+
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+}
+
static struct mlxsw_sp_fib_node *
mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
- MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
if (IS_ERR(vr))
return ERR_CAST(vr);
+ fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
- fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (fib_node)
return fib_node;
- fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (!fib_node) {
@@ -2076,22 +2299,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
goto err_fib_node_create;
}
+ err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
+ if (err)
+ goto err_fib_node_init;
+
return fib_node;
+err_fib_node_init:
+ mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_vr *vr = fib_node->vr;
+ struct mlxsw_sp_vr *vr = fib_node->fib->vr;
if (!list_empty(&fib_node->entry_list))
return;
+ mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
}
static struct mlxsw_sp_fib_entry *
@@ -2236,8 +2466,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_fib4_node_entry_add;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib4_node_entry_add:
@@ -2251,7 +2479,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
mlxsw_sp_fib4_node_list_remove(fib_entry);
}
@@ -2340,9 +2567,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
char ralst_pl[MLXSW_REG_RALST_LEN];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
- int err;
+ int i, err;
mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_SP_LPM_TREE_MIN);
@@ -2355,16 +2580,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
- MLXSW_SP_LPM_TREE_MIN);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
- if (err)
- return err;
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ if (!mlxsw_sp_vr_is_used(vr))
+ continue;
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
+ 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ ralue_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -2390,7 +2632,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- switch (fib_node->vr->proto) {
+ switch (fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
break;
@@ -2400,26 +2642,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
struct mlxsw_sp_fib_node *fib_node, *tmp;
- struct mlxsw_sp_vr *vr;
+
+ list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
+ bool do_break = &tmp->list == &fib->node_list;
+
+ mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
+ if (do_break)
+ break;
+ }
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
continue;
-
- list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
- list) {
- bool do_break = &tmp->list == &vr->fib->node_list;
-
- mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
}
}
@@ -2437,74 +2685,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (WARN_ON_ONCE(err))
- return err;
-
- mlxsw_reg_ritr_enable_set(ritr_pl, false);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
- mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
- mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
-}
-
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- u64 max_rifs;
- int err;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
- return -EIO;
-
- max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- goto err_rgcr_fail;
-
- return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
-}
-
struct mlxsw_sp_fib_event_work {
struct work_struct work;
union {
struct fib_entry_notifier_info fen_info;
+ struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
};
struct mlxsw_sp *mlxsw_sp;
@@ -2516,6 +2701,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ struct fib_rule *rule;
bool replace, append;
int err;
@@ -2539,7 +2725,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule) && !rule->l3mdev)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ fib_rule_put(rule);
break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
@@ -2582,6 +2771,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
@@ -2594,6 +2788,707 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ mlxsw_reg_ritr_enable_set(ritr_pl, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
+ mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
+ const struct in_device *in_dev,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!rif)
+ return true;
+ return false;
+ case NETDEV_DOWN:
+ if (rif && !in_dev->ifa_list &&
+ !netif_is_l3_slave(rif->dev))
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_INVALID_INDEX_RIF;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ u16 vr_id, struct net_device *l3_dev,
+ u16 rif_index, bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
+ vr_id, l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
+{
+ return MLXSW_SP_RFID_BASE + rif_index;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+ if (!rif)
+ return NULL;
+
+ INIT_LIST_HEAD(&rif->nexthop_list);
+ INIT_LIST_HEAD(&rif->neigh_list);
+ ether_addr_copy(rif->addr, l3_dev->dev_addr);
+ rif->mtu = l3_dev->mtu;
+ rif->vr_id = vr_id;
+ rif->dev = l3_dev;
+ rif->rif_index = rif_index;
+ rif->f = f;
+
+ return rif;
+}
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
+{
+ return rif->rif_index;
+}
+
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev->ifindex;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ u32 tb_id = l3mdev_fib_table(l3_dev);
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *rif;
+ u16 fid, rif_index;
+ int err;
+
+ rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+ return ERR_PTR(-ERANGE);
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
+ rif_index, true);
+ if (err)
+ goto err_vport_rif_sp_op;
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif_index);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ if (!rif) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
+ err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ netdev_dbg(mlxsw_sp_vport->dev,
+ "Counter alloc Failed err=%d\n", err);
+ }
+
+ f->rif = rif;
+ mlxsw_sp->rifs[rif_index] = rif;
+ vr->rif_count++;
+
+ return rif;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+ false);
+err_vport_rif_sp_op:
+ mlxsw_sp_vr_put(vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+ struct net_device *l3_dev = rif->dev;
+ struct mlxsw_sp_fid *f = rif->f;
+ u16 rif_index = rif->rif_index;
+ u16 fid = f->fid;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif_index] = NULL;
+ f->rif = NULL;
+
+ kfree(rif);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+ false);
+ mlxsw_sp_vr_put(vr);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif) {
+ rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
+ rif->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) ||
+ netif_is_lag_port(port_dev) ||
+ netif_is_ovs_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
+ 1, router_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ u32 tb_id = l3mdev_fib_table(l3_dev);
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
+ int err;
+
+ rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+ return -ERANGE;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+ if (err)
+ goto err_port_flood_set;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
+ rif_index, true);
+ if (err)
+ goto err_rif_bridge_op;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ if (!rif) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->rif = rif;
+ mlxsw_sp->rifs[rif_index] = rif;
+ vr->rif_count++;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+ false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+err_port_flood_set:
+ mlxsw_sp_vr_put(vr);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+ struct net_device *l3_dev = rif->dev;
+ struct mlxsw_sp_fid *f = rif->f;
+ u16 rif_index = rif->rif_index;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif_index] = NULL;
+ f->rif = NULL;
+
+ kfree(rif);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+ false);
+
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+ mlxsw_sp_vr_put(vr);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
+ unsigned long event)
+{
+ if (mlxsw_sp_port_dev_check(dev))
+ return mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ return mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ return mlxsw_sp_inetaddr_vlan_event(dev, event);
+ else
+ return 0;
+}
+
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event);
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
+ dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(rif->addr, dev->dev_addr);
+ rif->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+ return err;
+}
+
+static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ /* If netdev is already associated with a RIF, then we need to
+ * destroy it and create a new one with the new virtual router ID.
+ */
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (rif)
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+
+ return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
+}
+
+static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ return;
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+}
+
+int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ int err = 0;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ return 0;
+ case NETDEV_CHANGEUPPER:
+ if (info->linking)
+ err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
+ else
+ mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
+ break;
+ }
+
+ return err;
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
@@ -2606,6 +3501,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
mlxsw_sp_router_fib_flush(mlxsw_sp);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ u64 max_rifs;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@@ -2625,7 +3562,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_nexthop_group_ht_init;
- mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_lpm_init(mlxsw_sp);
+ if (err)
+ goto err_lpm_init;
+
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
goto err_vrs_init;
@@ -2647,6 +3587,8 @@ err_register_fib_notifier:
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
+ mlxsw_sp_lpm_fini(mlxsw_sp);
+err_lpm_init:
rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
err_nexthop_group_ht_init:
rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
@@ -2660,6 +3602,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(&mlxsw_sp->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
+ mlxsw_sp_lpm_fini(mlxsw_sp);
rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
__mlxsw_sp_router_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
new file mode 100644
index 000000000000..c3095fef6697
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -0,0 +1,58 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ROUTER_H_
+#define _MLXSW_ROUTER_H_
+
+#include "spectrum.h"
+
+enum mlxsw_sp_rif_counter_dir {
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+};
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ u64 *cnt);
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+
+#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 598727d578c1..0d8411f1f954 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
list_del(&f->list);
- if (f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
kfree(f);
@@ -745,27 +745,6 @@ err_port_allow_untagged_set:
return err;
}
-static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool is_member,
- bool untagged)
-{
- u16 vid, vid_e;
- int err;
-
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
- vid_end);
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
- is_member, untagged);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
@@ -804,8 +783,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, flag_untagged);
if (err) {
netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
vid_end);
@@ -863,8 +842,8 @@ err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
err_port_vlans_set:
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
@@ -1012,7 +991,7 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
if (clear_all_ports) {
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
if (mlxsw_sp->ports[i])
mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
}
@@ -1171,8 +1150,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (pvid >= vid_begin && pvid <= vid_end)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index ec1e886d4566..3b0f72455681 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1321,7 +1321,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
{
int i;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
@@ -1329,17 +1329,18 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
size_t alloc_size;
u8 module, width;
int i;
int err;
- alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_sx->ports)
return -ENOMEM;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ for (i = 1; i < max_ports; i++) {
err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
&width);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 02ea48b15eb5..e008fdbed20f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -55,6 +55,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
+ MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 279ee4612981..20358f87de57 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -212,25 +212,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
}
/**
- * ks8851_rx_1msg - select whether to use one or two messages for spi read
- * @ks: The device structure
- *
- * Return whether to generate a single message with a tx and rx buffer
- * supplied to spi_sync(), or alternatively send the tx and rx buffers
- * as separate messages.
- *
- * Depending on the hardware in use, a single message may be more efficient
- * on interrupts or work done by the driver.
- *
- * This currently always returns true until we add some per-device data passed
- * from the platform code to specify which mode is better.
- */
-static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
-{
- return true;
-}
-
-/**
* ks8851_rdreg - issue read register command and return the data
* @ks: The device state
* @op: The register address and byte enables in message format.
@@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (ks8851_rx_1msg(ks)) {
- msg = &ks->spi_msg1;
- xfer = &ks->spi_xfer1;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = trx;
- xfer->len = rxl + 2;
- } else {
+ if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
msg = &ks->spi_msg2;
xfer = ks->spi_xfer2;
@@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
xfer->tx_buf = NULL;
xfer->rx_buf = trx;
xfer->len = rxl;
+ } else {
+ msg = &ks->spi_msg1;
+ xfer = &ks->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
}
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (ks8851_rx_1msg(ks))
- memcpy(rxb, trx + 2, rxl);
- else
+ else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
+ else
+ memcpy(rxb, trx + 2, rxl);
}
/**
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 6ad44be08b33..c0d7d5eec7e7 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -228,8 +228,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
net_dbg_ratelimited("packet error\n");
- priv->stats.rx_dropped++;
- priv->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
goto rx_next;
}
@@ -245,8 +245,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(!skb)) {
net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
- priv->stats.rx_dropped++;
- priv->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
goto rx_next;
}
@@ -256,10 +256,10 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
- priv->stats.multicast++;
+ ndev->stats.multicast++;
rx_next:
wmb(); /* prevent setting ownership back too early */
@@ -296,8 +296,8 @@ static void moxart_tx_finished(struct net_device *ndev)
dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
priv->tx_skb[tx_tail] = NULL;
@@ -349,7 +349,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
goto out_unlock;
}
rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
@@ -400,13 +400,6 @@ out_unlock:
return ret;
}
-static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
-{
- struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-
- return &priv->stats;
-}
-
static void moxart_mac_setmulticast(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -456,7 +449,6 @@ static const struct net_device_ops moxart_netdev_ops = {
.ndo_open = moxart_mac_open,
.ndo_stop = moxart_mac_stop,
.ndo_start_xmit = moxart_mac_start_xmit,
- .ndo_get_stats = moxart_mac_get_stats,
.ndo_set_rx_mode = moxart_mac_set_rx_mode,
.ndo_set_mac_address = moxart_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index afc32ec998c0..686b8957d5cf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -293,7 +293,6 @@
struct moxart_mac_priv_t {
void __iomem *base;
- struct net_device_stats stats;
unsigned int reg_maccr;
unsigned int reg_imr;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 6933afa69df2..4b15f0f496aa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -6,8 +6,10 @@ nfp-objs := \
nfpcore/nfp_cpplib.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
+ nfpcore/nfp_mutex.o \
nfpcore/nfp_nffw.o \
nfpcore/nfp_nsp.o \
+ nfpcore/nfp_nsp_cmds.o \
nfpcore/nfp_nsp_eth.o \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
index 335beb8b8b45..97a8f00674d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -798,7 +798,7 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
@@ -818,7 +818,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
@@ -847,7 +847,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (swap) {
areg ^= breg;
@@ -1132,7 +1132,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return 0;
}
@@ -1143,7 +1143,7 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->insn.off != offsetof(struct xdp_md, data) &&
meta->insn.off != offsetof(struct xdp_md, data_end))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
@@ -1174,12 +1174,12 @@ static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->insn.off == offsetof(struct sk_buff, mark))
return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1192,7 +1192,7 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
@@ -1206,7 +1206,7 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
@@ -1245,7 +1245,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!imm) {
meta->skip = true;
@@ -1276,7 +1276,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
@@ -1302,7 +1302,7 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dedac720fb29..dde35dae35c5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -48,7 +48,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
@@ -253,6 +253,7 @@ exit_release_fw:
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
+ struct nfp_nsp_identify *nspi;
struct nfp_nsp *nsp;
int err;
@@ -269,6 +270,12 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ nspi = __nfp_nsp_identify(nsp);
+ if (nspi) {
+ dev_info(&pdev->dev, "BSP: %s\n", nspi->version);
+ kfree(nspi);
+ }
+
err = nfp_fw_load(pdev, pf, nsp);
if (err < 0) {
kfree(pf->eth_tbl);
@@ -385,8 +392,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- if (!list_empty(&pf->ports))
- nfp_net_pci_remove(pf);
+ nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 39105d0435e9..b57de047b002 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -42,7 +42,9 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
struct dentry;
struct pci_dev;
@@ -64,8 +66,11 @@ struct nfp_eth_table;
* @fw_loaded: Is the firmware loaded?
* @eth_tbl: NSP ETH table
* @ddir: Per-device debugfs directory
- * @num_ports: Number of adapter ports
+ * @num_ports: Number of adapter ports app firmware supports
+ * @num_netdevs: Number of netdevs spawned
* @ports: Linked list of port structures (struct nfp_net)
+ * @port_lock: Protects @ports, @num_ports, @num_netdevs
+ * @port_refresh_work: Work entry for taking netdevs out
*/
struct nfp_pf {
struct pci_dev *pdev;
@@ -88,7 +93,11 @@ struct nfp_pf {
struct dentry *ddir;
unsigned int num_ports;
+ unsigned int num_netdevs;
+
struct list_head ports;
+ struct work_struct port_refresh_work;
+ struct mutex port_lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e614a376b595..fcf81b3be830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,14 +50,14 @@
#include "nfp_net_ctrl.h"
-#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
-#define nn_warn_ratelimit(nn, fmt, args...) \
+#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
+#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
+#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
+#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_dp_warn(dp, fmt, args...) \
do { \
if (unlikely(net_ratelimit())) \
- netdev_warn((nn)->netdev, fmt, ## args); \
+ netdev_warn((dp)->netdev, fmt, ## args); \
} while (0)
/* Max time to wait for NFP to respond on updates (in seconds) */
@@ -112,6 +112,7 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
@@ -200,6 +201,7 @@ struct nfp_net_tx_buf {
* @txds: Virtual address of TX ring in host memory
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
+ * @is_xdp: Is this a XDP TX ring?
*/
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
@@ -220,6 +222,7 @@ struct nfp_net_tx_ring {
dma_addr_t dma;
unsigned int size;
+ bool is_xdp;
} ____cacheline_aligned;
/* RX and freelist descriptor format */
@@ -283,6 +286,12 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+struct nfp_meta_parsed {
+ u32 hash_type;
+ u32 hash;
+ u32 mark;
+};
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -306,17 +315,13 @@ struct nfp_net_rx_buf {
* @rd_p: FL/RX ring read pointer (free running)
* @idx: Ring index from Linux's perspective
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
- * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue
* @qcp_fl: Pointer to base of the QCP freelist queue
- * @qcp_rx: Pointer to base of the QCP RX queue
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for free list batching)
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @bufsz: Buffer allocation size for convenience of management routines
- * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -325,20 +330,17 @@ struct nfp_net_rx_ring {
u32 wr_p;
u32 rd_p;
- u16 idx;
- u16 wr_ptr_add;
+ u32 idx;
+ u32 wr_ptr_add;
int fl_qcidx;
- int rx_qcidx;
u8 __iomem *qcp_fl;
- u8 __iomem *qcp_rx;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
dma_addr_t dma;
unsigned int size;
- unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -433,19 +435,76 @@ struct nfp_stat_pair {
};
/**
- * struct nfp_net - NFP network device structure
- * @pdev: Backpointer to PCI device
- * @netdev: Backpointer to net_device structure
- * @is_vf: Is the driver attached to a VF?
+ * struct nfp_net_dp - NFP network device datapath data structure
+ * @dev: Backpointer to struct device
+ * @netdev: Backpointer to net_device structure
+ * @is_vf: Is the driver attached to a VF?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
- * @ctrl: Local copy of the control register/word.
- * @fl_bufsz: Currently configured size of the freelist buffers
+ * @chained_metadata_format: Firemware will use new metadata format
+ * @rx_dma_dir: Mapping direction for RX buffers
+ * @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
+ * @ctrl: Local copy of the control register/word.
+ * @fl_bufsz: Currently configured size of the freelist buffers
* @xdp_prog: Installed XDP program
- * @fw_ver: Firmware version
+ * @tx_rings: Array of pre-allocated TX ring structures
+ * @rx_rings: Array of pre-allocated RX ring structures
+ * @ctrl_bar: Pointer to mapped control BAR
+ *
+ * @txd_cnt: Size of the TX ring in number of descriptors
+ * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @num_r_vecs: Number of used ring vectors
+ * @num_tx_rings: Currently configured number of TX rings
+ * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
+ * @num_rx_rings: Currently configured number of RX rings
+ * @mtu: Device MTU
+ */
+struct nfp_net_dp {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u8 is_vf:1;
+ u8 bpf_offload_skip_sw:1;
+ u8 bpf_offload_xdp:1;
+ u8 chained_metadata_format:1;
+
+ u8 rx_dma_dir;
+ u8 rx_offset;
+
+ u32 rx_dma_off;
+
+ u32 ctrl;
+ u32 fl_bufsz;
+
+ struct bpf_prog *xdp_prog;
+
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
+ u8 __iomem *ctrl_bar;
+
+ /* Cold data follows */
+
+ unsigned int txd_cnt;
+ unsigned int rxd_cnt;
+
+ unsigned int num_r_vecs;
+
+ unsigned int num_tx_rings;
+ unsigned int num_stack_tx_rings;
+ unsigned int num_rx_rings;
+
+ unsigned int mtu;
+};
+
+/**
+ * struct nfp_net - NFP network device structure
+ * @dp: Datapath structure
+ * @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
+ * @rss_hfunc: RSS selected hash function
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
@@ -454,17 +513,9 @@ struct nfp_stat_pair {
* @rx_filter_change: Jiffies when statistics last changed
* @rx_filter_stats_timer: Timer for polling filter offload statistics
* @rx_filter_lock: Lock protecting timer state changes (teardown)
+ * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
- * @num_tx_rings: Currently configured number of TX rings
- * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
- * @num_rx_rings: Currently configured number of RX rings
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
- * @tx_rings: Array of pre-allocated TX ring structures
- * @rx_rings: Array of pre-allocated RX ring structures
- * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
- * @num_r_vecs: Number of used ring vectors
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -480,7 +531,8 @@ struct nfp_stat_pair {
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
- * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
+ * @link_changed: Has link state changes since last port refresh?
+ * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
* @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter
@@ -488,36 +540,24 @@ struct nfp_stat_pair {
* @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW
* @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts
* @qcp_cfg: Pointer to QCP queue used for configuration notification
- * @ctrl_bar: Pointer to mapped control BAR
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
* @port_list: Entry on device port list
+ * @pdev: Backpointer to PCI device
* @cpp: CPP device handle if available
+ * @eth_port: Translated ETH Table port entry
*/
struct nfp_net {
- struct pci_dev *pdev;
- struct net_device *netdev;
-
- unsigned is_vf:1;
- unsigned bpf_offload_skip_sw:1;
- unsigned bpf_offload_xdp:1;
-
- u32 ctrl;
- u32 fl_bufsz;
-
- u32 rx_offset;
-
- struct bpf_prog *xdp_prog;
-
- struct nfp_net_tx_ring *tx_rings;
- struct nfp_net_rx_ring *rx_rings;
+ struct nfp_net_dp dp;
struct nfp_net_fw_version fw_ver;
+
u32 cap;
u32 max_mtu;
+ u8 rss_hfunc;
u32 rss_cfg;
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
@@ -530,18 +570,10 @@ struct nfp_net {
unsigned int max_tx_rings;
unsigned int max_rx_rings;
- unsigned int num_tx_rings;
- unsigned int num_stack_tx_rings;
- unsigned int num_rx_rings;
-
int stride_tx;
int stride_rx;
- int txd_cnt;
- int rxd_cnt;
-
unsigned int max_r_vecs;
- unsigned int num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
@@ -557,6 +589,7 @@ struct nfp_net {
u32 me_freq_mhz;
bool link_up;
+ bool link_changed;
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
@@ -575,7 +608,6 @@ struct nfp_net {
u8 __iomem *qcp_cfg;
- u8 __iomem *ctrl_bar;
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
@@ -584,14 +616,10 @@ struct nfp_net {
struct list_head port_list;
+ struct pci_dev *pdev;
struct nfp_cpp *cpp;
-};
-struct nfp_net_ring_set {
- unsigned int n_rings;
- unsigned int mtu;
- unsigned int dcnt;
- void *rings;
+ struct nfp_eth_table_port *eth_port;
};
/* Functions to read/write from/to a BAR
@@ -599,42 +627,42 @@ struct nfp_net_ring_set {
*/
static inline u16 nn_readb(struct nfp_net *nn, int off)
{
- return readb(nn->ctrl_bar + off);
+ return readb(nn->dp.ctrl_bar + off);
}
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
- writeb(val, nn->ctrl_bar + off);
+ writeb(val, nn->dp.ctrl_bar + off);
}
static inline u16 nn_readw(struct nfp_net *nn, int off)
{
- return readw(nn->ctrl_bar + off);
+ return readw(nn->dp.ctrl_bar + off);
}
static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
{
- writew(val, nn->ctrl_bar + off);
+ writew(val, nn->dp.ctrl_bar + off);
}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
- return readl(nn->ctrl_bar + off);
+ return readl(nn->dp.ctrl_bar + off);
}
static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
{
- writel(val, nn->ctrl_bar + off);
+ writel(val, nn->dp.ctrl_bar + off);
}
static inline u64 nn_readq(struct nfp_net *nn, int off)
{
- return readq(nn->ctrl_bar + off);
+ return readq(nn->dp.ctrl_bar + off);
}
static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
{
- writeq(val, nn->ctrl_bar + off);
+ writeq(val, nn->dp.ctrl_bar + off);
}
/* Flush posted PCI writes by reading something without side effects */
@@ -776,6 +804,7 @@ void nfp_net_netdev_clean(struct net_device *netdev);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
@@ -787,9 +816,14 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
+ struct netlink_ext_ack *extack);
+
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
+int nfp_net_refresh_eth_port(struct nfp_net *nn);
+void nfp_net_refresh_port_table(struct nfp_net *nn);
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index a41377e26c07..db20376260f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,6 +41,7 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/module.h>
@@ -66,6 +67,7 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
+#include "nfpcore/nfp_nsp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
@@ -83,20 +85,33 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t
-nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
- int direction)
+static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
- return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static void
-nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
- unsigned int bufsz, int direction)
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
- dma_unmap_single(&nn->pdev->dev, dma_addr,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ dma_sync_single_for_device(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
+}
+
+static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+ dma_unmap_single_attrs(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+ len, dp->rx_dma_dir);
}
/* Firmware reconfig
@@ -327,19 +342,22 @@ void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n)
{
+ struct nfp_net_dp *dp = &nn->dp;
+
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
- nn->num_r_vecs = nn->max_r_vecs;
+ dp->num_r_vecs = nn->max_r_vecs;
memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
- if (nn->num_rx_rings > nn->num_r_vecs ||
- nn->num_tx_rings > nn->num_r_vecs)
- nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
- nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+ if (dp->num_rx_rings > dp->num_r_vecs ||
+ dp->num_tx_rings > dp->num_r_vecs)
+ dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
+ dp->num_rx_rings, dp->num_tx_rings,
+ dp->num_r_vecs);
- nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
- nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
- nn->num_stack_tx_rings = nn->num_tx_rings;
+ dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
+ dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
+ dp->num_stack_tx_rings = dp->num_tx_rings;
}
/**
@@ -373,6 +391,19 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
return IRQ_HANDLED;
}
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&nn->link_status_lock, flags);
+ ret = nn->link_changed;
+ nn->link_changed = false;
+ spin_unlock_irqrestore(&nn->link_status_lock, flags);
+
+ return ret;
+}
+
/**
* nfp_net_read_link_status() - Reread link status from control BAR
* @nn: NFP Network structure
@@ -392,13 +423,14 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
goto out;
nn->link_up = link_up;
+ nn->link_changed = true;
if (nn->link_up) {
- netif_carrier_on(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Up\n");
+ netif_carrier_on(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Up\n");
} else {
- netif_carrier_off(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Down\n");
+ netif_carrier_off(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Down\n");
}
out:
spin_unlock_irqrestore(&nn->link_status_lock, flags);
@@ -446,15 +478,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
* @tx_ring: TX ring structure
* @r_vec: IRQ vector servicing this ring
* @idx: Ring index
+ * @is_xdp: Is this an XDP TX ring?
*/
static void
nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx)
+ struct nfp_net_r_vector *r_vec, unsigned int idx,
+ bool is_xdp)
{
struct nfp_net *nn = r_vec->nfp_net;
tx_ring->idx = idx;
tx_ring->r_vec = r_vec;
+ tx_ring->is_xdp = is_xdp;
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -476,10 +511,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->r_vec = r_vec;
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
- rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
-
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
- rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
}
/**
@@ -530,7 +562,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry = &nn->irq_entries[vector_idx];
- snprintf(name, name_sz, format, netdev_name(nn->netdev));
+ snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
err = request_irq(entry->vector, handler, 0, name, nn);
if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -617,7 +649,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
/**
* nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @nn: NFP Net device
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor
@@ -626,7 +657,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size.
*/
-static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -657,7 +688,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
/**
* nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to TX descriptor
@@ -666,7 +697,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
* This function sets the TX checksum flags in the TX descriptor based
* on the configuration and the protocol of the packet to be transmitted.
*/
-static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -674,7 +706,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct iphdr *iph;
u8 l4_hdr;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
return;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -693,8 +725,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
} else if (ipv6h->version == 6) {
l4_hdr = ipv6h->nexthdr;
} else {
- nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
- iph->version);
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
return;
}
@@ -706,8 +737,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
default:
- nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
return;
}
@@ -737,28 +767,31 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_desc *txd, txdg;
- struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_net_tx_buf *txbuf;
struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
int f, nr_frags;
int wr_idx;
u16 qidx;
+ dp = &nn->dp;
qidx = skb_get_queue_mapping(skb);
- tx_ring = &nn->tx_rings[qidx];
+ tx_ring = &dp->tx_rings[qidx];
r_vec = tx_ring->r_vec;
- nd_q = netdev_get_tx_queue(nn->netdev, qidx);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_busy++;
u64_stats_update_end(&r_vec->tx_sync);
@@ -766,9 +799,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
}
/* Start with the head skbuf */
- dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -792,11 +825,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->mss = 0;
txd->l4_offset = 0;
- nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb);
- nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
@@ -810,9 +843,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[f];
fsize = skb_frag_size(frag);
- dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
@@ -851,8 +884,7 @@ err_unmap:
--f;
while (f >= 0) {
frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
@@ -861,13 +893,14 @@ err_unmap:
if (wr_idx < 0)
wr_idx += tx_ring->cnt;
}
- dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
- nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+ nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
@@ -884,7 +917,7 @@ err_free:
static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
u32 done_pkts = 0, done_bytes = 0;
@@ -894,6 +927,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
int fidx;
int idx;
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
@@ -918,8 +954,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
done_pkts += tx_ring->txbufs[idx].pkt_cnt;
@@ -927,8 +962,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
@@ -948,7 +982,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) {
/* Make sure TX thread will see updated tx_ring->rd_p */
@@ -966,11 +1000,13 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
u32 done_pkts = 0, done_bytes = 0;
int idx, todo;
u32 qcp_rd_p;
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
@@ -982,23 +1018,12 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
else
todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ done_pkts = todo;
while (todo--) {
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_ring->rd_p++;
- if (!tx_ring->txbufs[idx].frag)
- continue;
-
- nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
- __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
-
- done_pkts++;
done_bytes += tx_ring->txbufs[idx].real_len;
-
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].frag = NULL;
- tx_ring->txbufs[idx].fidx = -2;
}
tx_ring->qcp_rd_p = qcp_rd_p;
@@ -1015,52 +1040,43 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
static void
-nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag;
- struct pci_dev *pdev = nn->pdev;
struct netdev_queue *nd_q;
- while (tx_ring->rd_p != tx_ring->wr_p) {
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
struct nfp_net_tx_buf *tx_buf;
- int idx;
+ struct sk_buff *skb;
+ int idx, nr_frags;
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_buf = &tx_ring->txbufs[idx];
- if (tx_ring == r_vec->xdp_ring) {
- nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
- __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
- } else {
- struct sk_buff *skb = tx_ring->txbufs[idx].skb;
- int nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (tx_buf->fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
+ skb = tx_ring->txbufs[idx].skb;
+ nr_frags = skb_shinfo(skb)->nr_frags;
- /* check for last gather fragment */
- if (tx_buf->fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
tx_buf->dma_addr = 0;
tx_buf->skb = NULL;
tx_buf->fidx = -2;
@@ -1075,10 +1091,10 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
- if (tx_ring == r_vec->xdp_ring)
+ if (tx_ring->is_xdp)
return;
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -1087,7 +1103,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
+ for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
continue;
nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1098,16 +1114,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
/* Receive processing
*/
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
+nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
unsigned int fl_bufsz;
fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ fl_bufsz += dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
- fl_bufsz += nn->rx_offset;
- fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
+ fl_bufsz += dp->rx_offset;
+ fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1126,62 +1143,53 @@ nfp_net_free_frag(void *frag, bool xdp)
/**
* nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @rx_ring: RX ring structure of the skb
+ * @dp: NFP Net data path struct
* @dma_addr: Pointer to storage for DMA address (output param)
- * @fl_bufsz: size of freelist buffers
- * @xdp: Whether XDP is enabled
*
* This function will allcate a new page frag, map it for DMA.
*
* Return: allocated page frag or NULL on failure.
*/
-static void *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
- unsigned int fl_bufsz, bool xdp)
+static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- int direction;
void *frag;
- if (!xdp)
- frag = netdev_alloc_frag(fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = netdev_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
- *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, xdp);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
return frag;
}
-static void *
-nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
+static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!nn->xdp_prog)
- frag = napi_alloc_frag(nn->fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = napi_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, nn->xdp_prog);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
@@ -1190,17 +1198,21 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
/**
* nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring structure
* @frag: page fragment buffer
* @dma_addr: DMA address of skb mapping
*/
-static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
+static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
void *frag, dma_addr_t dma_addr)
{
unsigned int wr_idx;
wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
/* Stash SKB and DMA address away */
rx_ring->rxbufs[wr_idx].frag = frag;
rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
@@ -1208,7 +1220,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
rx_ring->wr_ptr_add++;
@@ -1249,19 +1262,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
* entries. After device is disabled nfp_net_rx_ring_reset() must be called
* to restore required ring geometry.
*/
static void
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
- int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1272,9 +1283,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
if (!rx_ring->rxbufs[i].frag)
continue;
- nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
- rx_ring->bufsz, direction);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
rx_ring->rxbufs[i].dma_addr = 0;
rx_ring->rxbufs[i].frag = NULL;
}
@@ -1282,13 +1292,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*/
static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_rx_buf *rxbufs;
unsigned int i;
@@ -1296,11 +1305,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
rxbufs = rx_ring->rxbufs;
for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].frag =
- nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
- rx_ring->bufsz, xdp);
+ rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
return -ENOMEM;
}
}
@@ -1310,14 +1317,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to fill
*/
-static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
+ nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
rx_ring->rxbufs[i].dma_addr);
}
@@ -1337,17 +1347,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
/**
* nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
* @skb: Pointer to SKB
*/
-static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_rx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
- if (!(nn->netdev->features & NETIF_F_RXCSUM))
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
@@ -1378,8 +1389,9 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- unsigned int type, __be32 *hash)
+static void
+nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
{
if (!(netdev->features & NETIF_F_RXHASH))
return;
@@ -1388,34 +1400,33 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
+ meta->hash_type = PKT_HASH_TYPE_L3;
break;
default:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
+ meta->hash_type = PKT_HASH_TYPE_L4;
break;
}
+
+ meta->hash = get_unaligned_be32(hash);
}
static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, struct nfp_net_rx_desc *rxd)
{
- struct nfp_net_rx_hash *rx_hash;
+ struct nfp_net_rx_hash *rx_hash = data;
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash);
}
static void *
-nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
- int meta_len)
+nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, int meta_len)
{
- u8 *data = skb->data - meta_len;
u32 meta_info;
meta_info = get_unaligned_be32(data);
@@ -1425,13 +1436,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
switch (meta_info & NFP_NET_META_FIELD_MASK) {
case NFP_NET_META_HASH:
meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, skb,
+ nfp_net_set_hash(netdev, meta,
meta_info & NFP_NET_META_FIELD_MASK,
(__be32 *)data);
data += 4;
break;
case NFP_NET_META_MARK:
- skb->mark = get_unaligned_be32(data);
+ meta->mark = get_unaligned_be32(data);
data += 4;
break;
default:
@@ -1445,8 +1456,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
}
static void
-nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
+nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
@@ -1458,53 +1470,47 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
if (skb && rxbuf && skb->head == rxbuf->frag)
page_ref_inc(virt_to_head_page(rxbuf->frag));
if (rxbuf)
- nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
if (skb)
dev_kfree_skb_any(skb);
}
static bool
-nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
- dma_addr_t new_dma_addr;
- void *new_frag;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return false;
- }
-
- new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
return false;
}
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
+
+ nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
txbuf->frag = rxbuf->frag;
txbuf->dma_addr = rxbuf->dma_addr;
txbuf->fidx = -1;
txbuf->pkt_cnt = 1;
txbuf->real_len = pkt_len;
- dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
pkt_len, DMA_BIDIRECTIONAL);
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = PCIE_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -1516,14 +1522,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
+ unsigned int *off, unsigned int *len)
{
struct xdp_buff xdp;
+ void *orig_data;
+ int ret;
- xdp.data = data;
- xdp.data_end = data + len;
+ xdp.data_hard_start = hard_start;
+ xdp.data = data + *off;
+ xdp.data_end = data + *off + *len;
- return bpf_prog_run_xdp(prog, &xdp);
+ orig_data = xdp.data;
+ ret = bpf_prog_run_xdp(prog, &xdp);
+
+ *len -= xdp.data - orig_data;
+ *off += xdp.data - orig_data;
+
+ return ret;
}
/**
@@ -1540,25 +1556,24 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
- int rx_dma_map_dir;
int idx;
rcu_read_lock();
- xdp_prog = READ_ONCE(nn->xdp_prog);
- rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
- unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
dma_addr_t new_dma_addr;
void *new_frag;
@@ -1573,6 +1588,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ memset(&meta, 0, sizeof(meta));
+
rx_ring->rd_p++;
pkts_polled++;
@@ -1593,11 +1610,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off = meta_len;
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
else
- pkt_off = nn->rx_offset;
- data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
@@ -1605,30 +1623,62 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (!dp->chained_metadata_format) {
+ nfp_net_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ meta_len);
+ if (unlikely(end != rxbuf->frag + pkt_off)) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- nn->bpf_offload_xdp)) {
+ dp->bpf_offload_xdp)) {
+ unsigned int dma_off;
+ void *hard_start;
int act;
- dma_sync_single_for_cpu(&nn->pdev->dev,
- rxbuf->dma_addr + pkt_off,
- pkt_len, DMA_BIDIRECTIONAL);
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
- pkt_len);
+ hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+
+ act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
+ &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
- pkt_off, pkt_len)))
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ dma_off,
+ pkt_len)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
case XDP_DROP:
- nfp_net_rx_give_one(rx_ring, rxbuf->frag,
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
continue;
}
@@ -1636,41 +1686,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue;
}
- new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
- &new_dma_addr);
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
continue;
}
- nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
- rx_dma_map_dir);
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
- skb_reserve(skb, data_off);
+ skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
- if (nn->fw_ver.major <= 3) {
- nfp_net_set_hash_desc(nn->netdev, skb, rxd);
- } else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
- if (unlikely(end != skb->data)) {
- nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
- nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
- continue;
- }
- }
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, nn->netdev);
+ skb->protocol = eth_type_trans(skb, dp->netdev);
- nfp_net_rx_csum(nn, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1707,10 +1745,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
nfp_net_xdp_complete(r_vec->xdp_ring);
}
- if (pkts_polled < budget) {
- napi_complete_done(napi, pkts_polled);
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- }
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
return pkts_polled;
}
@@ -1725,13 +1762,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(tx_ring->txbufs);
if (tx_ring->txds)
- dma_free_coherent(&pdev->dev, tx_ring->size,
+ dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
@@ -1743,24 +1779,21 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
* @tx_ring: TX Ring structure to allocate
- * @cnt: Ring buffer count
- * @is_xdp: True if ring will be used for XDP
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = cnt;
+ tx_ring->cnt = dp->txd_cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
- tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
@@ -1770,15 +1803,10 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
if (!tx_ring->txbufs)
goto err_alloc;
- if (!is_xdp)
- netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
+ if (!tx_ring->is_xdp)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
- nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
- tx_ring->idx, tx_ring->qcidx,
- tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
- is_xdp ? "XDP" : "");
-
return 0;
err_alloc:
@@ -1786,62 +1814,92 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_tx_ring *
-nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- unsigned int num_stack_tx_rings)
+static void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_tx_ring *rings;
- unsigned int r;
+ unsigned int i;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ if (!tx_ring->is_xdp)
+ return;
- for (r = 0; r < s->n_rings; r++) {
- int bias = 0;
+ for (i = 0; i < tx_ring->cnt; i++) {
+ if (!tx_ring->txbufs[i].frag)
+ return;
- if (r >= num_stack_tx_rings)
- bias = num_stack_tx_rings;
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+ __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+ }
+}
+
+static int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
+ unsigned int i;
- nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
+ if (!tx_ring->is_xdp)
+ return 0;
- if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
- goto err_free_prev;
+ for (i = 0; i < tx_ring->cnt; i++) {
+ txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+ if (!txbufs[i].frag) {
+ nfp_net_tx_ring_bufs_free(dp, tx_ring);
+ return -ENOMEM;
+ }
}
- return s->rings = rings;
-
-err_free_prev:
- while (r--)
- nfp_net_tx_ring_free(&rings[r]);
- kfree(rings);
- return NULL;
+ return 0;
}
-static void
-nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- struct nfp_net_ring_set new = *s;
+ unsigned int r;
+
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
- s->dcnt = nn->txd_cnt;
- s->rings = nn->tx_rings;
- s->n_rings = nn->num_tx_rings;
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ int bias = 0;
+
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
- nn->txd_cnt = new.dcnt;
- nn->tx_rings = new.rings;
- nn->num_tx_rings = new.n_rings;
+ nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
+ r, bias);
+
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
+ }
+ kfree(dp->tx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_tx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++)
- nfp_net_tx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
+ }
- kfree(rings);
+ kfree(dp->tx_rings);
}
/**
@@ -1851,13 +1909,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
- dma_free_coherent(&pdev->dev, rx_ring->size,
+ dma_free_coherent(dp->dev, rx_ring->size,
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
@@ -1869,26 +1926,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to allocate
- * @fl_bufsz: Size of buffers to allocate
- * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
- u32 cnt)
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = cnt;
- rx_ring->bufsz = fl_bufsz;
-
+ rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
- rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
@@ -1898,10 +1948,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
if (!rx_ring->rxbufs)
goto err_alloc;
- nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
- rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
- rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
-
return 0;
err_alloc:
@@ -1909,82 +1955,59 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_rx_ring *
-nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
- struct nfp_net_rx_ring *rings;
unsigned int r;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
goto err_free_prev;
- if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
goto err_free_ring;
}
- return s->rings = rings;
+ return 0;
err_free_prev:
while (r--) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
err_free_ring:
- nfp_net_rx_ring_free(&rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
- return NULL;
-}
-
-static void
-nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
- struct nfp_net_ring_set new = *s;
-
- s->mtu = nn->netdev->mtu;
- s->dcnt = nn->rxd_cnt;
- s->rings = nn->rx_rings;
- s->n_rings = nn->num_rx_rings;
-
- nn->netdev->mtu = new.mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
- nn->rxd_cnt = new.dcnt;
- nn->rx_rings = new.rings;
- nn->num_rx_rings = new.n_rings;
+ kfree(dp->rx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_rx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
- nfp_net_rx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
+ kfree(dp->rx_rings);
}
static void
-nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- int idx)
+nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, int idx)
{
- r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
+ r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
r_vec->tx_ring =
- idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
+ idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
- r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
- &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
+ r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
+ &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
}
static int
@@ -1994,11 +2017,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int err;
/* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
nfp_net_poll, NAPI_POLL_WEIGHT);
snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, idx);
+ "%s-rxtx-%d", nn->dp.netdev->name, idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
@@ -2045,7 +2068,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn)
{
int i;
- for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
+ for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
get_unaligned_le32(nn->rss_key + i));
}
@@ -2069,13 +2092,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
(factor * nn->rx_coalesce_usecs);
- for (i = 0; i < nn->num_rx_rings; i++)
+ for (i = 0; i < nn->dp.num_rx_rings; i++)
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
/* copy TX interrupt coalesce parameters */
value = (nn->tx_coalesce_max_frames << 16) |
(factor * nn->tx_coalesce_usecs);
- for (i = 0; i < nn->num_tx_rings; i++)
+ for (i = 0; i < nn->dp.num_tx_rings; i++)
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
}
@@ -2090,9 +2113,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
- get_unaligned_be32(nn->netdev->dev_addr));
+ get_unaligned_be32(nn->dp.netdev->dev_addr));
nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
- get_unaligned_be16(nn->netdev->dev_addr + 4));
+ get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2116,7 +2139,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
update = NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
@@ -2133,14 +2156,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_reset(&nn->rx_rings[r]);
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void
@@ -2162,13 +2185,17 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
}
-static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn: NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- u32 new_ctrl, update = 0;
+ u32 bufsz, new_ctrl, update = 0;
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
@@ -2184,22 +2211,23 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
nfp_net_write_mac_addr(nn);
- nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
- nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
- nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
+
+ bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
+ nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -2211,37 +2239,26 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
+ if (err) {
+ nfp_net_clear_config_and_disable(nn);
+ return err;
+ }
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
- if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- udp_tunnel_get_rx_info(nn->netdev);
+ udp_tunnel_get_rx_info(nn->dp.netdev);
}
- return err;
-}
-
-/**
- * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
- * @nn: NFP Net device to reconfigure
- */
-static int nfp_net_set_config_and_enable(struct nfp_net *nn)
-{
- int err;
-
- err = __nfp_net_set_config_and_enable(nn);
- if (err)
- nfp_net_clear_config_and_disable(nn);
-
- return err;
+ return 0;
}
/**
@@ -2252,12 +2269,12 @@ static void nfp_net_open_stack(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
napi_enable(&nn->r_vecs[r].napi);
enable_irq(nn->r_vecs[r].irq_vector);
}
- netif_tx_wake_all_queues(nn->netdev);
+ netif_tx_wake_all_queues(nn->dp.netdev);
enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
@@ -2266,22 +2283,8 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
int err, r;
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
- return -EBUSY;
- }
-
/* Step 1: Allocate resources for rings and the like
* - Request interrupts
* - Allocate RX and TX ring resources
@@ -2299,33 +2302,28 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err)
goto err_cleanup_vec_p;
}
- nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
- if (!nn->rx_rings) {
- err = -ENOMEM;
+ err = nfp_net_rx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_cleanup_vec;
- }
- nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
- nn->num_stack_tx_rings);
- if (!nn->tx_rings) {
- err = -ENOMEM;
+ err = nfp_net_tx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_free_rx_rings;
- }
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
+ err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
if (err)
goto err_free_rings;
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
if (err)
goto err_free_rings;
@@ -2351,11 +2349,11 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0;
err_free_rings:
- nfp_net_tx_ring_set_free(nn, &tx);
+ nfp_net_tx_rings_free(&nn->dp);
err_free_rx_rings:
- nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
+ nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
- r = nn->num_r_vecs;
+ r = nn->dp.num_r_vecs;
err_cleanup_vec_p:
while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
@@ -2374,15 +2372,15 @@ static void nfp_net_close_stack(struct nfp_net *nn)
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- netif_carrier_off(nn->netdev);
+ netif_carrier_off(nn->dp.netdev);
nn->link_up = false;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi);
}
- netif_tx_disable(nn->netdev);
+ netif_tx_disable(nn->dp.netdev);
}
/**
@@ -2393,17 +2391,19 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
- nfp_net_rx_ring_free(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
+ nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_free(&nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
+ nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
+ }
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(nn->rx_rings);
- kfree(nn->tx_rings);
+ kfree(nn->dp.rx_rings);
+ kfree(nn->dp.tx_rings);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2417,11 +2417,6 @@ static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
- nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
- return 0;
- }
-
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
nfp_net_close_stack(nn);
@@ -2443,7 +2438,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
u32 new_ctrl;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
@@ -2454,13 +2449,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
}
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -2469,181 +2464,174 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn)
for (i = 0; i < sizeof(nn->rss_itbl); i++)
nn->rss_itbl[i] =
- ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+ ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
}
-static int
-nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
- unsigned int *stack_tx_rings,
- struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx)
+static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ struct nfp_net_dp new_dp = *dp;
+
+ *dp = nn->dp;
+ nn->dp = new_dp;
+
+ nn->dp.netdev->mtu = new_dp.mtu;
+
+ if (!netif_is_rxfh_configured(nn->dp.netdev))
+ nfp_net_rss_init_itbl(nn);
+}
+
+static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
{
unsigned int r;
int err;
- if (rx)
- nfp_net_rx_ring_set_swap(nn, rx);
- if (tx)
- nfp_net_tx_ring_set_swap(nn, tx);
-
- swap(*num_vecs, nn->num_r_vecs);
- swap(*stack_tx_rings, nn->num_stack_tx_rings);
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
+ nfp_net_dp_swap(nn, dp);
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
-
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_rx_queues(nn->netdev,
- nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
if (err)
return err;
- if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
- err = netif_set_real_num_tx_queues(nn->netdev,
- nn->num_stack_tx_rings);
+ if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
+ err = netif_set_real_num_tx_queues(nn->dp.netdev,
+ nn->dp.num_stack_tx_rings);
if (err)
return err;
}
- return __nfp_net_set_config_and_enable(nn);
+ return nfp_net_set_config_and_enable(nn);
+}
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
+{
+ struct nfp_net_dp *new;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ *new = nn->dp;
+
+ /* Clear things which need to be recomputed */
+ new->fl_bufsz = 0;
+ new->tx_rings = NULL;
+ new->rx_rings = NULL;
+ new->num_r_vecs = 0;
+ new->num_stack_tx_rings = 0;
+
+ return new;
}
static int
-nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct netlink_ext_ack *extack)
{
/* XDP-enabled tests */
- if (!xdp_prog)
+ if (!dp->xdp_prog)
return 0;
- if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
- nn_warn(nn, "MTU too large w/ XDP enabled\n");
+ if (dp->fl_bufsz > PAGE_SIZE) {
+ NL_MOD_TRY_SET_ERR_MSG(extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
- if (tx && tx->n_rings > nn->max_tx_rings) {
- nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
+ if (dp->num_tx_rings > nn->max_tx_rings) {
+ NL_MOD_TRY_SET_ERR_MSG(extack, "Insufficient number of TX rings w/ XDP enabled");
return -EINVAL;
}
return 0;
}
-static void
-nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx,
- unsigned int stack_tx_rings, unsigned int num_vecs)
-{
- nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
- nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
- nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
- nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
- nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- nn->num_stack_tx_rings = stack_tx_rings;
- nn->num_r_vecs = num_vecs;
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
-
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
-}
-
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct netlink_ext_ack *extack)
{
- unsigned int stack_tx_rings, num_vecs, r;
- int err;
+ int r, err;
+
+ dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
- stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- if (*xdp_prog)
- stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
+ dp->num_stack_tx_rings = dp->num_tx_rings;
+ if (dp->xdp_prog)
+ dp->num_stack_tx_rings -= dp->num_rx_rings;
- num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
+ dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
- err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
+ err = nfp_net_check_config(nn, dp, extack);
if (err)
- return err;
+ goto exit_free_dp;
- if (!netif_running(nn->netdev)) {
- nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
- stack_tx_rings, num_vecs);
- return 0;
+ if (!netif_running(dp->netdev)) {
+ nfp_net_dp_swap(nn, dp);
+ err = 0;
+ goto exit_free_dp;
}
/* Prepare new rings */
- for (r = nn->num_r_vecs; r < num_vecs; r++) {
+ for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err) {
- num_vecs = r;
- goto err_cleanup_vecs;
- }
- }
- if (rx) {
- if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
- err = -ENOMEM;
+ dp->num_r_vecs = r;
goto err_cleanup_vecs;
}
}
- if (tx) {
- if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
- err = -ENOMEM;
- goto err_free_rx;
- }
- }
+
+ err = nfp_net_rx_rings_prepare(nn, dp);
+ if (err)
+ goto err_cleanup_vecs;
+
+ err = nfp_net_tx_rings_prepare(nn, dp);
+ if (err)
+ goto err_free_rx;
/* Stop device, swap in new rings, try to start the firmware */
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
- err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err = nfp_net_dp_swap_enable(nn, dp);
if (err) {
int err2;
nfp_net_clear_config_and_disable(nn);
/* Try with old configuration and old rings */
- err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err2 = nfp_net_dp_swap_enable(nn, dp);
if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
err, err2);
}
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
- if (tx)
- nfp_net_tx_ring_set_free(nn, tx);
+ nfp_net_rx_rings_free(dp);
+ nfp_net_tx_rings_free(dp);
nfp_net_open_stack(nn);
+exit_free_dp:
+ kfree(dp);
return err;
err_free_rx:
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+ nfp_net_rx_rings_free(dp);
err_cleanup_vecs:
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ kfree(dp);
return err;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = new_mtu,
- .dcnt = nn->rxd_cnt,
- };
+ struct nfp_net_dp *dp;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->mtu = new_mtu;
+
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -2652,7 +2640,7 @@ static void nfp_net_stat64(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
u64 data[3];
unsigned int start;
@@ -2694,12 +2682,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct nfp_net *nn = netdev_priv(netdev);
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (proto != htons(ETH_P_ALL))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
- if (!nn->bpf_offload_xdp)
+ if (!nn->dp.bpf_offload_xdp)
return nfp_net_bpf_offload(nn, tc->cls_bpf);
else
return -EBUSY;
@@ -2718,7 +2706,7 @@ static int nfp_net_set_features(struct net_device *netdev,
/* Assume this is not called with features we have not advertised */
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
@@ -2762,7 +2750,7 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
nn_err(nn, "Cannot disable HW TC offload while in use\n");
return -EBUSY;
}
@@ -2770,16 +2758,16 @@ static int nfp_net_set_features(struct net_device *netdev,
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return 0;
- nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
+ nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
return err;
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
return 0;
}
@@ -2830,6 +2818,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int
+nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ if (!nn->eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nn->eth_port->is_split)
+ err = snprintf(name, len, "p%d", nn->eth_port->label_port);
+ else
+ err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
+ nn->eth_port->label_subport);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -2842,7 +2850,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
nn->vxlan_ports[idx] = port;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
return;
BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
@@ -2921,8 +2929,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->bpf_offload_xdp)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->dp.bpf_offload_xdp)
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
} else {
@@ -2935,48 +2943,44 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
/* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE)
nfp_net_xdp_offload(nn, NULL);
- nn->bpf_offload_xdp = prog && !ret;
+ nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
-static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
{
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
+ struct bpf_prog *old_prog = nn->dp.xdp_prog;
+ struct bpf_prog *prog = xdp->prog;
+ struct nfp_net_dp *dp;
int err;
- if (prog && prog->xdp_adjust_head) {
- nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
- if (!prog && !nn->xdp_prog)
+ if (!prog && !nn->dp.xdp_prog)
return 0;
- if (prog && nn->xdp_prog) {
- prog = xchg(&nn->xdp_prog, prog);
+ if (prog && nn->dp.xdp_prog) {
+ prog = xchg(&nn->dp.xdp_prog, prog);
bpf_prog_put(prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
- tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->xdp_prog = prog;
+ dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
+ dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
+ err = nfp_net_ring_reconfig(nn, dp, xdp->extack);
if (err)
return err;
- /* @prog got swapped and is now the old one */
- if (prog)
- bpf_prog_put(prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
@@ -2987,9 +2991,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
- return nfp_net_xdp_setup(nn, xdp->prog);
+ return nfp_net_xdp_setup(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
+ xdp->prog_attached = !!nn->dp.xdp_prog;
return 0;
default:
return -EINVAL;
@@ -3008,6 +3012,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3020,9 +3025,9 @@ static const struct net_device_ops nfp_net_netdev_ops = {
void nfp_net_info(struct nfp_net *nn)
{
nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
- nn->is_vf ? "VF " : "",
- nn->num_tx_rings, nn->max_tx_rings,
- nn->num_rx_rings, nn->max_rx_rings);
+ nn->dp.is_vf ? "VF " : "",
+ nn->dp.num_tx_rings, nn->max_tx_rings,
+ nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
@@ -3074,21 +3079,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
nn = netdev_priv(netdev);
- nn->netdev = netdev;
+ nn->dp.netdev = netdev;
+ nn->dp.dev = &pdev->dev;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
- nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
- nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
+ nn->dp.num_tx_rings = min_t(unsigned int,
+ max_tx_rings, num_online_cpus());
+ nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
netif_get_num_default_rss_queues());
- nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
- nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
+ nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
+ nn->dp.num_r_vecs = min_t(unsigned int,
+ nn->dp.num_r_vecs, num_online_cpus());
- nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
- nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
+ nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->rx_filter_lock);
@@ -3108,7 +3116,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
*/
void nfp_net_netdev_free(struct nfp_net *nn)
{
- free_netdev(nn->netdev);
+ free_netdev(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_rss_key_sz() - Get current size of the RSS key
+ * @nn: NFP Net device instance
+ *
+ * Return: size of the RSS key for currently selected hash function.
+ */
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
+{
+ switch (nn->rss_hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return NFP_NET_CFG_RSS_KEY_SZ;
+ case ETH_RSS_HASH_XOR:
+ return 0;
+ case ETH_RSS_HASH_CRC32:
+ return 4;
+ }
+
+ nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
+ return 0;
}
/**
@@ -3117,14 +3146,32 @@ void nfp_net_netdev_free(struct nfp_net *nn)
*/
static void nfp_net_rss_init(struct nfp_net *nn)
{
- netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
+ unsigned long func_bit, rss_cap_hfunc;
+ u32 reg;
+
+ /* Read the RSS function capability and select first supported func */
+ reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
+ if (!rss_cap_hfunc)
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
+ NFP_NET_CFG_RSS_TOEPLITZ);
+
+ func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
+ if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
+ dev_warn(nn->dp.dev,
+ "Bad RSS config, defaulting to Toeplitz hash\n");
+ func_bit = ETH_RSS_HASH_TOP_BIT;
+ }
+ nn->rss_hfunc = 1 << func_bit;
+
+ netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
nfp_net_rss_init_itbl(nn);
/* Enable IPv4/IPv6 TCP by default */
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_TOEPLITZ |
+ FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
NFP_NET_CFG_RSS_MASK;
}
@@ -3151,6 +3198,10 @@ int nfp_net_netdev_init(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int err;
+ nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3158,17 +3209,26 @@ int nfp_net_netdev_init(struct net_device *netdev)
nfp_net_write_mac_addr(nn);
/* Determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2)
- nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- else
- nn->rx_offset = NFP_NET_RX_OFFSET;
+ if (nn->fw_ver.major >= 2) {
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ if (reg > NFP_NET_MAX_PREPEND) {
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
+ return -EINVAL;
+ }
+ nn->dp.rx_offset = reg;
+ } else {
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+ }
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
netdev->mtu = nn->max_mtu;
else
netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
+ nn->dp.mtu = netdev->mtu;
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
/* Advertise/enable offloads based on capabilities
*
@@ -3179,31 +3239,31 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_HIGHDMA;
if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
netdev->hw_features |= NETIF_F_SG;
- nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
netdev->hw_features |= NETIF_F_RXHASH;
nfp_net_rss_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
}
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL;
- nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
netdev->hw_enc_features = netdev->hw_features;
}
@@ -3212,11 +3272,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
netdev->features = netdev->hw_features;
@@ -3229,14 +3289,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_irqmod_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
/* Stash the re-configuration queue away. First odd queue in TX Bar */
@@ -3275,10 +3335,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- unregister_netdev(nn->netdev);
+ unregister_netdev(nn->dp.netdev);
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- if (nn->bpf_offload_xdp)
+ if (nn->dp.xdp_prog)
+ bpf_prog_put(nn->dp.xdp_prog);
+ if (nn->dp.bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 385ba355c965..d04ccc9f6116 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -177,6 +177,19 @@
#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
#define NFP_NET_CFG_STS 0x0034
#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
+#define NFP_NET_CFG_STS_LINK_RATE \
+ (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT)
+#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
@@ -192,6 +205,14 @@
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
/**
+ * RSS capabilities
+ * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * @NFP_NET_CFG_RSS_HFUNC)
+ */
+#define NFP_NET_CFG_RSS_CAP 0x0054
+#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
+
+/**
* VXLAN/UDP encap configuration
* @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
@@ -249,7 +270,11 @@
#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
+#define NFP_NET_CFG_RSS_HFUNC 0xff000000
#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
+#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */
+#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */
+#define NFP_NET_CFG_RSS_HFUNCS 3
#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
#define NFP_NET_CFG_RSS_KEY_SZ 0x28
#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 6e9372a18375..4077c59bf782 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,9 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
- int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
+ int fl_rd_p, fl_wr_p, rxd_cnt;
struct nfp_net_rx_desc *rxd;
struct nfp_net *nn;
void *frag;
@@ -54,19 +54,18 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
rxd_cnt = rx_ring->cnt;
fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
- rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
- rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
- seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
- rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p,
- fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
+ seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
+ rx_ring->idx, rx_ring->fl_qcidx,
+ rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
+ rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
for (i = 0; i < rxd_cnt; i++) {
rxd = &rx_ring->rxds[i];
@@ -89,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
seq_puts(file, " FL_RD");
if (i == fl_wr_p % rxd_cnt)
seq_puts(file, " FL_WR");
- if (i == rx_rd_p % rxd_cnt)
- seq_puts(file, " RX_RD");
- if (i == rx_wr_p % rxd_cnt)
- seq_puts(file, " RX_WR");
seq_putc(file, '\n');
}
@@ -143,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
txd_cnt = tx_ring->cnt;
@@ -151,8 +146,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
- tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+ tx_ring->idx, tx_ring->qcidx,
+ tx_ring == r_vec->tx_ring ? "" : "xdp",
+ tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
+ tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
for (i = 0; i < txd_cnt; i++) {
txd = &tx_ring->txds[i];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2649f7523c81..abbb47e60cc3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,6 +40,7 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -48,6 +49,7 @@
#include <linux/ethtool.h>
#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
@@ -126,9 +128,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
-#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
+#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_RVEC_GATHER_STATS 7
-#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
+#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
@@ -172,6 +174,119 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
}
+/**
+ * nfp_net_get_link_ksettings - Get Link Speed settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed settings based on info in the BAR provided by the fw.
+ */
+static int
+nfp_net_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ static const u32 ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
+ };
+ struct nfp_net *nn = netdev_priv(netdev);
+ u32 sts, ls;
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+
+ if (nn->eth_port)
+ cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (!netif_carrier_ok(netdev))
+ return 0;
+
+ /* Use link speed from ETH table if available, otherwise try the BAR */
+ if (nn->eth_port) {
+ int err;
+
+ if (nfp_net_link_changed_read_clear(nn)) {
+ err = nfp_net_refresh_eth_port(nn);
+ if (err)
+ return err;
+ }
+
+ cmd->base.port = nn->eth_port->port_type;
+ cmd->base.speed = nn->eth_port->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ return 0;
+ }
+
+ sts = nn_readl(nn, NFP_NET_CFG_STS);
+
+ ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
+ if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+ return -EOPNOTSUPP;
+
+ if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
+ ls >= ARRAY_SIZE(ls_to_ethtool))
+ return 0;
+
+ cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int
+nfp_net_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_nsp *nsp;
+ int err;
+
+ if (!nn->eth_port)
+ return -EOPNOTSUPP;
+
+ if (netif_running(netdev)) {
+ nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ return -EBUSY;
+ }
+
+ nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
+ NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+ if (err)
+ goto err_bad_set;
+ if (cmd->base.speed != SPEED_UNKNOWN) {
+ u32 speed = cmd->base.speed / nn->eth_port->lanes;
+
+ err = __nfp_eth_set_speed(nsp, speed);
+ if (err)
+ goto err_bad_set;
+ }
+
+ err = nfp_eth_config_commit_end(nsp);
+ if (err > 0)
+ return 0; /* no change */
+
+ nfp_net_refresh_port_table(nn);
+
+ return err;
+
+err_bad_set:
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+}
+
static void nfp_net_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -179,30 +294,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
- ring->rx_pending = nn->rxd_cnt;
- ring->tx_pending = nn->txd_cnt;
+ ring->rx_pending = nn->dp.rxd_cnt;
+ ring->tx_pending = nn->dp.txd_cnt;
}
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = txd_cnt,
- };
+ struct nfp_net_dp *dp;
- if (nn->rxd_cnt != rxd_cnt)
- reconfig_rx = &rx;
- if (nn->txd_cnt != txd_cnt)
- reconfig_tx = &tx;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ dp->rxd_cnt = rxd_cnt;
+ dp->txd_cnt = txd_cnt;
+
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -223,11 +330,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
return -EINVAL;
- if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
@@ -245,7 +352,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_r_vecs; i++) {
+ for (i = 0; i < nn->dp.num_r_vecs; i++) {
sprintf(p, "rvec_%u_rx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_pkts", i);
@@ -267,13 +374,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
p += ETH_GSTRING_LEN;
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
- for (i = 0; i < nn->num_tx_rings; i++) {
+ for (i = 0; i < nn->dp.num_tx_rings; i++) {
sprintf(p, "txq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "txq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_rx_rings; i++) {
+ for (i = 0; i < nn->dp.num_rx_rings; i++) {
sprintf(p, "rxq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rxq_%u_bytes", i);
@@ -306,12 +413,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
break;
case NFP_NET_DEV_ET_STATS:
- io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
+ io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
data[i] = readq(io_p);
break;
}
}
- for (j = 0; j < nn->num_r_vecs; j++) {
+ for (j = 0; j < nn->dp.num_r_vecs; j++) {
unsigned int start;
do {
@@ -337,16 +444,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
- for (j = 0; j < nn->num_tx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
+ for (j = 0; j < nn->dp.num_tx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
- for (j = 0; j < nn->num_rx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
+ for (j = 0; j < nn->dp.num_rx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
}
@@ -410,7 +517,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = nn->num_rx_rings;
+ cmd->data = nn->dp.num_rx_rings;
return 0;
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
@@ -454,13 +561,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return -EINVAL;
}
- new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
+ new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
if (new_rss_cfg == nn->rss_cfg)
return 0;
- writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
+ writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
if (err)
return err;
@@ -496,7 +603,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
- return NFP_NET_CFG_RSS_KEY_SZ;
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ return -EOPNOTSUPP;
+
+ return nfp_net_rss_key_sz(nn);
}
static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -512,9 +624,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
indir[i] = nn->rss_itbl[i];
if (key)
- memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
+ if (hfunc) {
+ *hfunc = nn->rss_hfunc;
+ if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ }
return 0;
}
@@ -527,14 +642,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
- !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
+ !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
if (!key && !indir)
return 0;
if (key) {
- memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
+ memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
if (indir) {
@@ -564,7 +679,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
- regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
+ regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
}
static int nfp_net_get_coalesce(struct net_device *netdev,
@@ -676,7 +791,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
ec->tx_coalesce_usecs_high ||
ec->tx_max_coalesced_frames_high ||
ec->rate_sample_interval)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Compute factor used to convert coalesce '_usecs' parameters to
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
@@ -736,16 +851,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int num_tx_rings;
- num_tx_rings = nn->num_tx_rings;
- if (nn->xdp_prog)
- num_tx_rings -= nn->num_rx_rings;
+ num_tx_rings = nn->dp.num_tx_rings;
+ if (nn->dp.xdp_prog)
+ num_tx_rings -= nn->dp.num_rx_rings;
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
channel->max_combined = min(channel->max_rx, channel->max_tx);
channel->max_other = NFP_NET_NON_Q_VECTORS;
- channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
- channel->rx_count = nn->num_rx_rings - channel->combined_count;
+ channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
+ channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
channel->tx_count = num_tx_rings - channel->combined_count;
channel->other_count = NFP_NET_NON_Q_VECTORS;
}
@@ -753,29 +868,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
unsigned int total_tx)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = total_rx,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = total_tx,
- .dcnt = nn->txd_cnt,
- };
+ struct nfp_net_dp *dp;
- if (nn->num_rx_rings != total_rx)
- reconfig_rx = &rx;
- if (nn->num_stack_tx_rings != total_tx ||
- (nn->xdp_prog && reconfig_rx))
- reconfig_tx = &tx;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
- if (nn->xdp_prog)
- tx.n_rings += total_rx;
+ dp->num_rx_rings = total_rx;
+ dp->num_tx_rings = total_tx;
+ /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
+ if (dp->xdp_prog)
+ dp->num_tx_rings += total_rx;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static int nfp_net_set_channels(struct net_device *netdev,
@@ -823,6 +928,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
.set_channels = nfp_net_set_channels,
+ .get_link_ksettings = nfp_net_get_link_ksettings,
+ .set_link_ksettings = nfp_net_set_link_ksettings,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3afcdc11480c..8cb87cbe1120 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -47,11 +47,12 @@
#include <linux/pci_regs.h>
#include <linux/msi.h>
#include <linux/random.h>
+#include <linux/rtnetlink.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
#include "nfp_net_ctrl.h"
@@ -129,61 +130,61 @@ err_area:
return (u8 __iomem *)ERR_PTR(err);
}
+/**
+ * nfp_net_get_mac_addr() - Get the MAC address.
+ * @nn: NFP Network structure
+ * @cpp: NFP CPP handle
+ * @id: NFP port id
+ *
+ * First try to get the MAC address from NSP ETH table. If that
+ * fails try HWInfo. As a last resort generate a random address.
+ */
static void
-nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
- unsigned int id)
+nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
{
+ struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
+ if (nn->eth_port) {
+ ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
+ return;
+ }
+
snprintf(name, sizeof(name), "eth%d.mac", id);
mac_str = nfp_hwinfo_lookup(cpp, name);
if (!mac_str) {
- dev_warn(&nn->pdev->dev,
- "Can't lookup MAC address. Generate\n");
- eth_hw_addr_random(nn->netdev);
+ dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
+ eth_hw_addr_random(dp->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
- dev_warn(&nn->pdev->dev,
+ dev_warn(dp->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(dp->netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(dp->netdev->dev_addr, mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, mac_addr);
}
-/**
- * nfp_net_get_mac_addr() - Get the MAC address.
- * @nn: NFP Network structure
- * @pf: NFP PF device structure
- * @id: NFP port id
- *
- * First try to get the MAC address from NSP ETH table. If that
- * fails try HWInfo. As a last resort generate a random address.
- */
-static void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
+static struct nfp_eth_table_port *
+nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
{
int i;
- for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
- if (pf->eth_tbl->ports[i].eth_index == id) {
- const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
+ for (i = 0; eth_tbl && i < eth_tbl->count; i++)
+ if (eth_tbl->ports[i].eth_index == id)
+ return &eth_tbl->ports[i];
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
- return;
- }
-
- nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+ return NULL;
}
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
@@ -282,6 +283,7 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
while (!list_empty(&pf->ports)) {
nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
list_del(&nn->port_list);
+ pf->num_netdevs--;
nfp_net_netdev_free(nn);
}
@@ -290,7 +292,8 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
static struct nfp_net *
nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver)
+ int stride, struct nfp_net_fw_version *fw_ver,
+ struct nfp_eth_table_port *eth_port)
{
u32 n_tx_rings, n_rx_rings;
struct nfp_net *nn;
@@ -305,12 +308,13 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
nn->cpp = pf->cpp;
nn->fw_ver = *fw_ver;
- nn->ctrl_bar = ctrl_bar;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
- nn->is_vf = 0;
+ nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
+ nn->eth_port = eth_port;
return nn;
}
@@ -322,7 +326,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
int err;
/* Get MAC address */
- nfp_net_get_mac_addr(nn, pf, id);
+ nfp_net_get_mac_addr(nn, pf->cpp, id);
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
@@ -330,7 +334,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
return err;
@@ -347,6 +351,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
int stride, struct nfp_net_fw_version *fw_ver)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
+ struct nfp_eth_table_port *eth_port;
struct nfp_net *nn;
unsigned int i;
int err;
@@ -362,17 +367,27 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
- nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
- stride, fw_ver);
- if (IS_ERR(nn)) {
- err = PTR_ERR(nn);
- goto err_free_prev;
+ eth_port = nfp_net_find_port(pf->eth_tbl, i);
+ if (eth_port && eth_port->override_changed) {
+ nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
+ } else {
+ nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
+ rx_bar, stride,
+ fw_ver, eth_port);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
+ }
+ list_add_tail(&nn->port_list, &pf->ports);
+ pf->num_netdevs++;
}
- list_add_tail(&nn->port_list, &pf->ports);
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
+ if (list_empty(&pf->ports))
+ return -ENODEV;
+
return 0;
err_free_prev:
@@ -399,7 +414,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Get MSI-X vectors */
wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list)
- wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+ wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
@@ -408,7 +423,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
}
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
- NFP_NET_MIN_PORT_IRQS * pf->num_ports,
+ NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
wanted_irqs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
@@ -418,7 +433,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Distribute IRQs to ports */
irqs_left = num_irqs;
- ports_left = pf->num_ports;
+ ports_left = pf->num_netdevs;
list_for_each_entry(nn, &pf->ports, port_list) {
unsigned int n;
@@ -444,7 +459,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
@@ -454,6 +469,108 @@ err_nn_free:
return err;
}
+static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
+{
+ nfp_net_debugfs_dir_clean(&pf->ddir);
+
+ nfp_net_irqs_disable(pf->pdev);
+ kfree(pf->irq_entries);
+
+ nfp_cpp_area_release_free(pf->rx_area);
+ nfp_cpp_area_release_free(pf->tx_area);
+ nfp_cpp_area_release_free(pf->ctrl_area);
+}
+
+static void nfp_net_refresh_netdevs(struct work_struct *work)
+{
+ struct nfp_pf *pf = container_of(work, struct nfp_pf,
+ port_refresh_work);
+ struct nfp_eth_table *eth_table;
+ struct nfp_net *nn, *next;
+
+ mutex_lock(&pf->port_lock);
+
+ /* Check for nfp_net_pci_remove() racing against us */
+ if (list_empty(&pf->ports))
+ goto out;
+
+ list_for_each_entry(nn, &pf->ports, port_list)
+ nfp_net_link_changed_read_clear(nn);
+
+ eth_table = nfp_eth_read_ports(pf->cpp);
+ if (!eth_table) {
+ nfp_err(pf->cpp, "Error refreshing port config!\n");
+ goto out;
+ }
+
+ rtnl_lock();
+ list_for_each_entry(nn, &pf->ports, port_list) {
+ if (!nn->eth_port)
+ continue;
+ nn->eth_port = nfp_net_find_port(eth_table,
+ nn->eth_port->eth_index);
+ }
+ rtnl_unlock();
+
+ kfree(pf->eth_tbl);
+ pf->eth_tbl = eth_table;
+
+ list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
+ if (!nn->eth_port) {
+ nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
+ continue;
+ }
+ if (!nn->eth_port->override_changed)
+ continue;
+
+ nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
+
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_netdev_clean(nn->dp.netdev);
+
+ list_del(&nn->port_list);
+ pf->num_netdevs--;
+ nfp_net_netdev_free(nn);
+ }
+
+ if (list_empty(&pf->ports))
+ nfp_net_pci_remove_finish(pf);
+out:
+ mutex_unlock(&pf->port_lock);
+}
+
+void nfp_net_refresh_port_table(struct nfp_net *nn)
+{
+ struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+
+ schedule_work(&pf->port_refresh_work);
+}
+
+int nfp_net_refresh_eth_port(struct nfp_net *nn)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_eth_table *eth_table;
+
+ eth_table = nfp_eth_read_ports(nn->cpp);
+ if (!eth_table) {
+ nn_err(nn, "Error refreshing port state table!\n");
+ return -EIO;
+ }
+
+ eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
+ if (!eth_port) {
+ nn_err(nn, "Error finding state of the port!\n");
+ kfree(eth_table);
+ return -EIO;
+ }
+
+ memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
+
+ kfree(eth_table);
+
+ return 0;
+}
+
/*
* PCI device functions
*/
@@ -467,17 +584,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
int stride;
int err;
+ INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
+ mutex_init(&pf->port_lock);
+
/* Verify that the board has completed initialization */
if (!nfp_is_ready(pf->cpp)) {
nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
return -EINVAL;
}
+ mutex_lock(&pf->port_lock);
pf->num_ports = nfp_net_pf_get_num_ports(pf);
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
- if (!ctrl_bar)
- return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ if (!ctrl_bar) {
+ err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ goto err_unlock;
+ }
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
@@ -551,6 +674,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_clean_ddir;
+ mutex_unlock(&pf->port_lock);
+
return 0;
err_clean_ddir:
@@ -560,6 +685,8 @@ err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->ctrl_area);
+err_unlock:
+ mutex_unlock(&pf->port_lock);
return err;
}
@@ -567,20 +694,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn;
+ mutex_lock(&pf->port_lock);
+ if (list_empty(&pf->ports))
+ goto out;
+
list_for_each_entry(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_pf_free_netdevs(pf);
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_irqs_disable(pf->pdev);
- kfree(pf->irq_entries);
+ nfp_net_pci_remove_finish(pf);
+out:
+ mutex_unlock(&pf->port_lock);
- nfp_cpp_area_release_free(pf->rx_area);
- nfp_cpp_area_release_free(pf->tx_area);
- nfp_cpp_area_release_free(pf->ctrl_area);
+ cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
index 18a851eb3508..cc823df12c8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
spin_lock_bh(&nn->rx_filter_lock);
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
mod_timer(&nn->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
@@ -119,12 +119,12 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
if (tc_no_actions(cls_bpf->exts))
return NN_ACT_DIRECT;
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* TC legacy mode */
if (!tc_single_action(cls_bpf->exts))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
tcf_exts_to_list(cls_bpf->exts, &actions);
list_for_each_entry(a, &actions, list) {
@@ -132,11 +132,11 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return NN_ACT_TC_DROP;
if (is_tcf_mirred_egress_redirect(a) &&
- tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
return NN_ACT_TC_REDIR;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int
@@ -152,7 +152,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
int ret;
if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = nfp_net_bpf_get_act(nn, cls_bpf);
if (ret < 0)
@@ -160,16 +160,15 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
act = ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->netdev->mtu) {
+ if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
- *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
- GFP_KERNEL);
+ *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
return -ENOMEM;
@@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return 0;
out:
- dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
}
@@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
u64 bpf_addr = dma_addr;
int err;
- nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+ nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
if (dense_mode)
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
@@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
nn_err(nn, "FW command error while loading BPF: %d\n", err);
/* Enable passing packets through BPF function */
- nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
- dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
@@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
spin_lock_bh(&nn->rx_filter_lock);
- nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
spin_unlock_bh(&nn->rx_filter_lock);
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
del_timer_sync(&nn->rx_filter_stats_timer);
- nn->bpf_offload_skip_sw = 0;
+ nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
@@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
- if (nn->bpf_offload_skip_sw)
+ if (nn->dp.bpf_offload_skip_sw)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return 0;
case TC_CLSBPF_ADD:
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -290,6 +289,6 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return nfp_net_bpf_stats_update(nn, cls_bpf);
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 39407f7cc586..86e61be6f35c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
if (!is_valid_ether_addr(mac_addr)) {
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(nn->dp.netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
@@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->ctrl_bar = ctrl_bar;
- nn->is_vf = 1;
+ nn->dp.ctrl_bar = ctrl_bar;
+ nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
NFP_NET_MIN_PORT_IRQS,
- NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+ NFP_NET_NON_Q_VECTORS +
+ nn->dp.num_r_vecs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO;
@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
goto err_irqs_disable;
@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
nfp_net_irqs_disable(pdev);
@@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
} else {
iounmap(vf->q_bar);
}
- iounmap(nn->ctrl_bar);
+ iounmap(nn->dp.ctrl_bar);
nfp_net_netdev_free(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 42cb720b696d..4df2ce261b3f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -48,32 +48,26 @@
const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
-/* Implemented in nfp_nsp.c */
+/* Implemented in nfp_nsp.c, low level functions */
struct nfp_nsp;
-struct firmware;
-
-struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
-void nfp_nsp_close(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
-int nfp_nsp_wait(struct nfp_nsp *state);
-int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
-int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state);
+bool nfp_nsp_config_modified(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+ unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
-#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
-#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
-
-/* NFP Resource Table self-identifier */
-#define NFP_RESOURCE_TBL_NAME "nfp.res"
-#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
-
-/* All other keys are CRC32-POSIX of the 8-byte identification string */
+/* All keys are CRC32-POSIX of the 8-byte identification string */
/* ARM/PCI vNIC Interfaces 0..3 */
#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0"
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 15cc3e77cf6a..43dc68e01274 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
#define TARGET_WIDTH_64 8
static int
-compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
u32 *bar_config, u64 *bar_base,
int tgt, int act, int tok, u64 offset, size_t size, int width)
{
@@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp,
/* Return EAGAIN if no resource is available */
static int
-find_unused_bar_noblock(struct nfp6000_pcie *nfp,
+find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
int tgt, int act, int tok,
u64 offset, size_t size, int width)
{
- int n, invalid = 0;
+ int n, busy = 0;
for (n = 0; n < nfp->bars; n++) {
- struct nfp_bar *bar = &nfp->bar[n];
+ const struct nfp_bar *bar = &nfp->bar[n];
int err;
- if (bar->bitsize == 0) {
- invalid++;
- continue;
- }
-
- if (atomic_read(&bar->refcnt) != 0)
+ if (!bar->bitsize)
continue;
/* Just check to see if we can make it fit... */
err = compute_bar(nfp, bar, NULL, NULL,
tgt, act, tok, offset, size, width);
+ if (err)
+ continue;
- if (err < 0)
- invalid++;
- else
+ if (!atomic_read(&bar->refcnt))
return n;
+
+ busy++;
}
- return (n == invalid) ? -EINVAL : -EAGAIN;
+ if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
+ tgt, act, tok, offset, size, width))
+ return -EINVAL;
+
+ return -EAGAIN;
}
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 40108e66c654..e2abba4c3a3f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -65,39 +65,49 @@ struct nfp_cpp_resource {
u64 end;
};
-struct nfp_cpp_mutex {
- struct list_head list;
- struct nfp_cpp *cpp;
- int target;
- u16 usage;
- u16 depth;
- unsigned long long address;
- u32 key;
-};
-
+/**
+ * struct nfp_cpp - main nfpcore device structure
+ * Following fields are read-only after probe() exits or netdevs are spawned.
+ * @dev: embedded device structure
+ * @op: low-level implementation ops
+ * @priv: private data of the low-level implementation
+ * @model: chip model
+ * @interface: chip interface id we are using to reach it
+ * @serial: chip serial number
+ * @imb_cat_table: CPP Mapping Table
+ *
+ * Following fields can be used only in probe() or with rtnl held:
+ * @hwinfo: HWInfo database fetched from the device
+ * @rtsym: firmware run time symbols
+ *
+ * Following fields use explicit locking:
+ * @resource_list: NFP CPP resource list
+ * @resource_lock: protects @resource_list
+ *
+ * @area_cache_list: cached areas for cpp/xpb read/write speed up
+ * @area_cache_mutex: protects @area_cache_list
+ *
+ * @waitq: area wait queue
+ */
struct nfp_cpp {
struct device dev;
- void *priv; /* Private data of the low-level implementation */
+ void *priv;
u32 model;
u16 interface;
u8 serial[NFP_SERIAL_LEN];
const struct nfp_cpp_operations *op;
- struct list_head resource_list; /* NFP CPP resource list */
- struct list_head mutex_cache; /* Mutex cache */
+ struct list_head resource_list;
rwlock_t resource_lock;
wait_queue_head_t waitq;
- /* NFP6000 CPP Mapping Table */
u32 imb_cat_table[16];
- /* Cached areas for cpp/xpb readl/writel speedups */
- struct mutex area_cache_mutex; /* Lock for the area cache */
+ struct mutex area_cache_mutex;
struct list_head area_cache_list;
- /* Cached information */
void *hwinfo;
void *rtsym;
};
@@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
{
struct nfp_cpp_area_cache *cache, *ctmp;
struct nfp_cpp_resource *res, *rtmp;
- struct nfp_cpp_mutex *mutex, *mtmp;
-
- /* There should be no mutexes in the cache at this point. */
- WARN_ON(!list_empty(&cpp->mutex_cache));
- /* .. but if there are, unlock them and complain. */
- list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
- dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
- mutex->target, (unsigned long long)mutex->address,
- mutex->depth, mutex->usage);
-
- /* Forcing an unlock */
- mutex->depth = 1;
- nfp_cpp_mutex_unlock(mutex);
-
- /* Forcing a free */
- mutex->usage = 1;
- nfp_cpp_mutex_free(mutex);
- }
/* Remove all caches */
list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
@@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
*/
void nfp_cpp_area_free(struct nfp_cpp_area *area)
{
+ if (atomic_read(&area->refcount))
+ nfp_warn(area->cpp, "Warning: freeing busy area\n");
nfp_cpp_area_put(area);
}
+static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
+{
+ *status = area->cpp->op->area_acquire(area);
+
+ return *status != -EAGAIN;
+}
+
+static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ int err, status;
+
+ if (atomic_inc_return(&area->refcount) > 1)
+ return 0;
+
+ if (!area->cpp->op->area_acquire)
+ return 0;
+
+ err = wait_event_interruptible(area->cpp->waitq,
+ nfp_cpp_area_acquire_try(area, &status));
+ if (!err)
+ err = status;
+ if (err) {
+ nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
+ atomic_dec(&area->refcount);
+ return err;
+ }
+
+ nfp_cpp_area_get(area);
+
+ return 0;
+}
+
/**
* nfp_cpp_area_acquire() - lock down a CPP area for access
* @area: CPP area handle
@@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area)
*/
int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
{
- mutex_lock(&area->mutex);
- if (atomic_inc_return(&area->refcount) == 1) {
- int (*a_a)(struct nfp_cpp_area *);
-
- a_a = area->cpp->op->area_acquire;
- if (a_a) {
- int err;
+ int ret;
- wait_event_interruptible(area->cpp->waitq,
- (err = a_a(area)) != -EAGAIN);
- if (err < 0) {
- atomic_dec(&area->refcount);
- mutex_unlock(&area->mutex);
- return err;
- }
- }
- }
+ mutex_lock(&area->mutex);
+ ret = __nfp_cpp_area_acquire(area);
mutex_unlock(&area->mutex);
- nfp_cpp_area_get(area);
- return 0;
+ return ret;
}
/**
@@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
* the need for special case code below when
* checking against available cache size.
*/
- if (length == 0)
- return NULL;
-
- if (list_empty(&cpp->area_cache_list) || id == 0)
+ if (length == 0 || id == 0)
return NULL;
/* Remap from cpp_island to cpp_target */
@@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
if (err < 0)
return NULL;
- addr += *offset;
-
mutex_lock(&cpp->area_cache_mutex);
+ if (list_empty(&cpp->area_cache_list)) {
+ mutex_unlock(&cpp->area_cache_mutex);
+ return NULL;
+ }
+
+ addr += *offset;
+
/* See if we have a match */
list_for_each_entry(cache, &cpp->area_cache_list, entry) {
if (id == cache->id &&
@@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
- if (err)
- goto out;
+ if (err) {
+ nfp_cpp_area_free(area);
+ return err;
+ }
}
err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
-out:
+
if (cache)
area_cache_put(cpp, cache);
else
@@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
- if (err)
- goto out;
+ if (err) {
+ nfp_cpp_area_free(area);
+ return err;
+ }
}
err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
-out:
if (cache)
area_cache_put(cpp, cache);
else
@@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
- INIT_LIST_HEAD(&cpp->mutex_cache);
INIT_LIST_HEAD(&cpp->resource_list);
INIT_LIST_HEAD(&cpp->area_cache_list);
mutex_init(&cpp->area_cache_mutex);
@@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
{
return &cpp_explicit[1];
}
-
-/* THIS FUNCTION IS NOT EXPORTED */
-static u32 nfp_mutex_locked(u16 interface)
-{
- return (u32)interface << 16 | 0x000f;
-}
-
-static u32 nfp_mutex_unlocked(u16 interface)
-{
- return (u32)interface << 16 | 0x0000;
-}
-
-static bool nfp_mutex_is_locked(u32 val)
-{
- return (val & 0xffff) == 0x000f;
-}
-
-static bool nfp_mutex_is_unlocked(u32 val)
-{
- return (val & 0xffff) == 0000;
-}
-
-/* If you need more than 65536 recursive locks, please rethink your code. */
-#define MUTEX_DEPTH_MAX 0xffff
-
-static int
-nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
-{
- /* Not permitted on invalid interfaces */
- if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
- NFP_CPP_INTERFACE_TYPE_INVALID)
- return -EINVAL;
-
- /* Address must be 64-bit aligned */
- if (address & 7)
- return -EINVAL;
-
- if (*target != NFP_CPP_TARGET_MU)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_init() - Initialize a mutex location
- * @cpp: NFP CPP handle
- * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address: Offset into the address space of the NFP CPP target ID
- * @key: Unique 32-bit value for this mutex
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * will initialize 64 bits of data at the location.
- *
- * This creates the initial mutex state, as locked by this
- * nfp_cpp_interface().
- *
- * This function should only be called when setting up
- * the initial lock state upon boot-up of the system.
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
- int target, unsigned long long address, u32 key)
-{
- const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
- u16 interface = nfp_cpp_interface(cpp);
- int err;
-
- err = nfp_cpp_mutex_validate(interface, &target, address);
- if (err)
- return err;
-
- err = nfp_cpp_writel(cpp, muw, address + 4, key);
- if (err)
- return err;
-
- err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
- if (err)
- return err;
-
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_alloc() - Create a mutex handle
- * @cpp: NFP CPP handle
- * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address: Offset into the address space of the NFP CPP target ID
- * @key: 32-bit unique key (must match the key at this location)
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * reserve 64 bits of data at the location for use by the handle.
- *
- * Only target/address pairs that point to entities that support the
- * MU Atomic Engine's CmpAndSwap32 command are supported.
- *
- * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
- */
-struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
- unsigned long long address, u32 key)
-{
- const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
- u16 interface = nfp_cpp_interface(cpp);
- struct nfp_cpp_mutex *mutex;
- int err;
- u32 tmp;
-
- err = nfp_cpp_mutex_validate(interface, &target, address);
- if (err)
- return NULL;
-
- /* Look for mutex on cache list */
- list_for_each_entry(mutex, &cpp->mutex_cache, list) {
- if (mutex->target == target && mutex->address == address) {
- mutex->usage++;
- return mutex;
- }
- }
-
- err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
- if (err < 0)
- return NULL;
-
- if (tmp != key)
- return NULL;
-
- mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
- if (!mutex)
- return NULL;
-
- mutex->cpp = cpp;
- mutex->target = target;
- mutex->address = address;
- mutex->key = key;
- mutex->depth = 0;
- mutex->usage = 1;
-
- /* Add mutex to cache list */
- list_add(&mutex->list, &cpp->mutex_cache);
-
- return mutex;
-}
-
-/**
- * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
- * @mutex: NFP CPP Mutex handle
- */
-void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
-{
- if (--mutex->usage)
- return;
-
- /* Remove mutex from cache */
- list_del(&mutex->list);
- kfree(mutex);
-}
-
-/**
- * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
-{
- unsigned long warn_at = jiffies + 15 * HZ;
- unsigned int timeout_ms = 1;
- int err;
-
- /* We can't use a waitqueue here, because the unlocker
- * might be on a separate CPU.
- *
- * So just wait for now.
- */
- for (;;) {
- err = nfp_cpp_mutex_trylock(mutex);
- if (err != -EBUSY)
- break;
-
- err = msleep_interruptible(timeout_ms);
- if (err != 0)
- return -ERESTARTSYS;
-
- if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
- dev_warn(mutex->cpp->dev.parent,
- "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
- mutex->usage, mutex->depth,
- mutex->target, mutex->address, mutex->key);
- }
- }
-
- return err;
-}
-
-/**
- * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
-{
- const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
- const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
- struct nfp_cpp *cpp = mutex->cpp;
- u32 key, value;
- u16 interface;
- int err;
-
- interface = nfp_cpp_interface(cpp);
-
- if (mutex->depth > 1) {
- mutex->depth--;
- return 0;
- }
-
- err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
- if (err < 0)
- return err;
-
- if (key != mutex->key)
- return -EPERM;
-
- err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
- if (err < 0)
- return err;
-
- if (value != nfp_mutex_locked(interface))
- return -EACCES;
-
- err = nfp_cpp_writel(cpp, muw, mutex->address,
- nfp_mutex_unlocked(interface));
- if (err < 0)
- return err;
-
- mutex->depth = 0;
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 if the lock succeeded, -errno on failure
- */
-int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
-{
- const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
- const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
- const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
- struct nfp_cpp *cpp = mutex->cpp;
- u32 key, value, tmp;
- int err;
-
- if (mutex->depth > 0) {
- if (mutex->depth == MUTEX_DEPTH_MAX)
- return -E2BIG;
- mutex->depth++;
- return 0;
- }
-
- /* Verify that the lock marker is not damaged */
- err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
- if (err < 0)
- return err;
-
- if (key != mutex->key)
- return -EPERM;
-
- /* Compare against the unlocked state, and if true,
- * write the interface id into the top 16 bits, and
- * mark as locked.
- */
- value = nfp_mutex_locked(nfp_cpp_interface(cpp));
-
- /* We use test_set_imm here, as it implies a read
- * of the current state, and sets the bits in the
- * bytemask of the command to 1s. Since the mutex
- * is guaranteed to be 64-bit aligned, the bytemask
- * of this 32-bit command is ensured to be 8'b00001111,
- * which implies that the lower 4 bits will be set to
- * ones regardless of the initial state.
- *
- * Since this is a 'Readback' operation, with no Pull
- * data, we can treat this as a normal Push (read)
- * atomic, which returns the original value.
- */
- err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
- if (err < 0)
- return err;
-
- /* Was it unlocked? */
- if (nfp_mutex_is_unlocked(tmp)) {
- /* The read value can only be 0x....0000 in the unlocked state.
- * If there was another contending for this lock, then
- * the lock state would be 0x....000f
- */
-
- /* Write our owner ID into the lock
- * While not strictly necessary, this helps with
- * debug and bookkeeping.
- */
- err = nfp_cpp_writel(cpp, muw, mutex->address, value);
- if (err < 0)
- return err;
-
- mutex->depth = 1;
- return 0;
- }
-
- /* Already locked by us? Success! */
- if (tmp == value) {
- mutex->depth = 1;
- return 0;
- }
-
- return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
new file mode 100644
index 000000000000..8a99c189efa8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+struct nfp_cpp_mutex {
+ struct nfp_cpp *cpp;
+ int target;
+ u16 depth;
+ unsigned long long address;
+ u32 key;
+};
+
+static u32 nfp_mutex_locked(u16 interface)
+{
+ return (u32)interface << 16 | 0x000f;
+}
+
+static u32 nfp_mutex_unlocked(u16 interface)
+{
+ return (u32)interface << 16 | 0x0000;
+}
+
+static bool nfp_mutex_is_locked(u32 val)
+{
+ return (val & 0xffff) == 0x000f;
+}
+
+static bool nfp_mutex_is_unlocked(u32 val)
+{
+ return (val & 0xffff) == 0000;
+}
+
+/* If you need more than 65536 recursive locks, please rethink your code. */
+#define NFP_MUTEX_DEPTH_MAX 0xffff
+
+static int
+nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
+{
+ /* Not permitted on invalid interfaces */
+ if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
+ NFP_CPP_INTERFACE_TYPE_INVALID)
+ return -EINVAL;
+
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return -EINVAL;
+
+ if (*target != NFP_CPP_TARGET_MU)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_init() - Initialize a mutex location
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: Unique 32-bit value for this mutex
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
+ int target, unsigned long long address, u32 key)
+{
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_alloc() - Create a mutex handle
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: 32-bit unique key (must match the key at this location)
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine's CmpAndSwap32 command are supported.
+ *
+ * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, u32 key)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ u16 interface = nfp_cpp_interface(cpp);
+ struct nfp_cpp_mutex *mutex;
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return NULL;
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NULL;
+
+ mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
+ if (!mutex)
+ return NULL;
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+
+ return mutex;
+}
+
+/**
+ * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
+ * @mutex: NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ kfree(mutex);
+}
+
+/**
+ * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned int timeout_ms = 1;
+ int err;
+
+ /* We can't use a waitqueue here, because the unlocker
+ * might be on a separate CPU.
+ *
+ * So just wait for now.
+ */
+ for (;;) {
+ err = nfp_cpp_mutex_trylock(mutex);
+ if (err != -EBUSY)
+ break;
+
+ err = msleep_interruptible(timeout_ms);
+ if (err != 0)
+ return -ERESTARTSYS;
+
+ if (time_is_before_eq_jiffies(warn_at)) {
+ warn_at = jiffies + 60 * HZ;
+ nfp_warn(mutex->cpp,
+ "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
+ mutex->depth,
+ mutex->target, mutex->address, mutex->key);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value;
+ u16 interface;
+ int err;
+
+ interface = nfp_cpp_interface(cpp);
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ if (value != nfp_mutex_locked(interface))
+ return -EACCES;
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address,
+ nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ mutex->depth = 0;
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 if the lock succeeded, -errno on failure
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value, tmp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
+ return -E2BIG;
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ /* Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = nfp_mutex_locked(nfp_cpp_interface(cpp));
+
+ /* We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ return err;
+
+ /* Was it unlocked? */
+ if (nfp_mutex_is_unlocked(tmp)) {
+ /* The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ */
+
+ /* Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ return err;
+
+ mutex->depth = 1;
+ return 0;
+ }
+
+ return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 34c50987c377..2fa9247bb23d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -49,6 +49,7 @@
#include "nfp.h"
#include "nfp_cpp.h"
+#include "nfp_nsp.h"
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
@@ -77,7 +78,7 @@
#define NSP_MAGIC 0xab10
#define NSP_MAJOR 0
-#define NSP_MINOR (__MAX_SPCODE - 1)
+#define NSP_MINOR 8
#define NSP_CODE_MAJOR GENMASK(15, 12)
#define NSP_CODE_MINOR GENMASK(11, 0)
@@ -92,8 +93,18 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+};
- __MAX_SPCODE,
+static const struct {
+ int code;
+ const char *msg;
+} nsp_errors[] = {
+ { 6010, "could not map to phy for port" },
+ { 6011, "not an allowed rate/lanes for port" },
+ { 6012, "not an allowed rate/lanes for port" },
+ { 6013, "high/low error, change other port first" },
+ { 6014, "config not found in flash" },
};
struct nfp_nsp {
@@ -103,8 +114,63 @@ struct nfp_nsp {
u16 major;
u16 minor;
} ver;
+
+ /* Eth table config state */
+ bool modified;
+ unsigned int idx;
+ void *entries;
};
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state)
+{
+ return state->cpp;
+}
+
+bool nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+ return state->modified;
+}
+
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified)
+{
+ state->modified = modified;
+}
+
+void *nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+ return state->entries;
+}
+
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+ return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+ state->entries = entries;
+ state->idx = idx;
+}
+
+void nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+ state->entries = NULL;
+ state->idx = 0;
+}
+
+static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val)
+{
+ int i;
+
+ if (!ret_val)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(nsp_errors); i++)
+ if (ret_val == nsp_errors[i].code)
+ nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg);
+}
+
static int nfp_nsp_check(struct nfp_nsp *state)
{
struct nfp_cpp *cpp = state->cpp;
@@ -209,9 +275,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
if ((*reg & mask) == val)
return 0;
- err = msleep_interruptible(100);
- if (err)
- return err;
+ if (msleep_interruptible(25))
+ return -ERESTARTSYS;
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
@@ -228,7 +293,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
*
* Return: 0 for success with no result
*
- * 1..255 for NSP completion with a result code
+ * positive value for NSP completion with a result code
*
* -EAGAIN if the NSP is not yet present
* -ENODEV if the NSP is not a supported model
@@ -239,7 +304,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
u32 buff_cpp, u64 buff_addr)
{
- u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
u32 nsp_cpp;
int err;
@@ -292,18 +357,20 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return err;
}
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+ if (err < 0)
+ return err;
+ ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
err = FIELD_GET(NSP_STATUS_RESULT, reg);
if (err) {
- nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
- -err, code);
+ nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, code);
+ nfp_nsp_print_extended_error(state, ret_val);
return -err;
}
- err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
- if (err < 0)
- return err;
-
- return FIELD_GET(NSP_COMMAND_OPTION, reg);
+ return ret_val;
}
static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
@@ -380,9 +447,10 @@ int nfp_nsp_wait(struct nfp_nsp *state)
if (err != -EAGAIN)
break;
- err = msleep_interruptible(100);
- if (err)
+ if (msleep_interruptible(25)) {
+ err = -ERESTARTSYS;
break;
+ }
if (time_after(start_time, wait_until)) {
err = -ETIMEDOUT;
@@ -424,3 +492,9 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state,
return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
NULL, 0);
}
+
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
+ buf, size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
new file mode 100644
index 000000000000..36b21e4dc56d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+struct firmware;
+struct nfp_cpp;
+struct nfp_nsp;
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+ NFP_INTERFACE_SFPP = 10,
+ NFP_INTERFACE_SFP28 = 28,
+ NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_CXP = 100,
+ NFP_INTERFACE_QSFP28 = 112,
+};
+
+enum nfp_eth_media {
+ NFP_MEDIA_DAC_PASSIVE = 0,
+ NFP_MEDIA_DAC_ACTIVE,
+ NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+ NFP_ANEG_AUTO = 0,
+ NFP_ANEG_SEARCH,
+ NFP_ANEG_25G_CONSORTIUM,
+ NFP_ANEG_25G_IEEE,
+ NFP_ANEG_DISABLED,
+};
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media: media type of the @interface
+ * @aneg: auto negotiation mode
+ * @mac_addr: interface MAC address
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @is_split: is interface part of a split port
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ unsigned int interface;
+ enum nfp_eth_media media;
+
+ enum nfp_eth_aneg aneg;
+
+ u8 mac_addr[ETH_ALEN];
+
+ u8 label_port;
+ u8 label_subport;
+
+ bool enabled;
+ bool tx_enabled;
+ bool rx_enabled;
+
+ bool override_changed;
+
+ /* Computed fields */
+ u8 port_type;
+
+ bool is_split;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+ bool configed);
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+/**
+ * struct nfp_nsp_identify - NSP static information
+ * @version: opaque version string
+ * @flags: version flags
+ * @br_primary: branch id of primary bootloader
+ * @br_secondary: branch id of secondary bootloader
+ * @br_nsp: branch id of NSP
+ * @primary: version of primarary bootloader
+ * @secondary: version id of secondary bootloader
+ * @nsp: version id of NSP
+ */
+struct nfp_nsp_identify {
+ char version[40];
+ u8 flags;
+ u8 br_primary;
+ u8 br_secondary;
+ u8 br_nsp;
+ u16 primary;
+ u16 secondary;
+ u16 nsp;
+};
+
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index edf703d319c8..e7a263de3731 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ * Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,51 +31,59 @@
* SOFTWARE.
*/
-#ifndef NSP_NSP_ETH_H
-#define NSP_NSP_ETH_H 1
+#include <linux/kernel.h>
+#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
+#include "nfp.h"
+#include "nfp_nsp.h"
-/**
- * struct nfp_eth_table - ETH table information
- * @count: number of table entries
- * @ports: table of ports
- *
- * @eth_index: port index according to legacy ethX numbering
- * @index: chip-wide first channel index
- * @nbi: NBI index
- * @base: first channel index (within NBI)
- * @lanes: number of channels
- * @speed: interface speed (in Mbps)
- * @mac_addr: interface MAC address
- * @label: interface id string
- * @enabled: is enabled?
- * @tx_enabled: is TX enabled?
- * @rx_enabled: is RX enabled?
- */
-struct nfp_eth_table {
- unsigned int count;
- struct nfp_eth_table_port {
- unsigned int eth_index;
- unsigned int index;
- unsigned int nbi;
- unsigned int base;
- unsigned int lanes;
- unsigned int speed;
+struct nsp_identify {
+ u8 version[40];
+ u8 flags;
+ u8 br_primary;
+ u8 br_secondary;
+ u8 br_nsp;
+ __le16 primary;
+ __le16 secondary;
+ __le16 nsp;
+ __le16 reserved;
+};
- u8 mac_addr[ETH_ALEN];
- char label[8];
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
+{
+ struct nfp_nsp_identify *nspi = NULL;
+ struct nsp_identify *ni;
+ int ret;
- bool enabled;
- bool tx_enabled;
- bool rx_enabled;
- } ports[0];
-};
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
+ return NULL;
+
+ ni = kzalloc(sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ return NULL;
+
+ ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
+ if (ret < 0) {
+ nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n",
+ ret);
+ goto exit_free;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi)
+ goto exit_free;
-struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
-struct nfp_eth_table *
-__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+ memcpy(nspi->version, ni->version, sizeof(nspi->version));
+ nspi->version[sizeof(nspi->version) - 1] = '\0';
+ nspi->flags = ni->flags;
+ nspi->br_primary = ni->br_primary;
+ nspi->br_secondary = ni->br_secondary;
+ nspi->br_nsp = ni->br_nsp;
+ nspi->primary = le16_to_cpu(ni->primary);
+ nspi->secondary = le16_to_cpu(ni->secondary);
+ nspi->nsp = le16_to_cpu(ni->nsp);
-#endif
+exit_free:
+ kfree(ni);
+ return nspi;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 1ece1f8ae4b3..639438d8313a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -43,13 +43,13 @@
#include <linux/module.h>
#include "nfp.h"
-#include "nfp_nsp_eth.h"
+#include "nfp_nsp.h"
#include "nfp6000/nfp6000.h"
#define NSP_ETH_NBI_PORT_COUNT 24
#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
- sizeof(struct eth_table_entry))
+ sizeof(union eth_table_entry))
#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
@@ -58,14 +58,32 @@
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+
+enum nfp_eth_raw {
+ NSP_ETH_RAW_PORT = 0,
+ NSP_ETH_RAW_STATE,
+ NSP_ETH_RAW_MAC,
+ NSP_ETH_RAW_CONTROL,
+
+ NSP_ETH_NUM_RAW
+};
enum nfp_eth_rate {
RATE_INVALID = 0,
@@ -76,29 +94,49 @@ enum nfp_eth_rate {
RATE_25G,
};
-struct eth_table_entry {
- __le64 port;
- __le64 state;
- u8 mac_addr[6];
- u8 resv[2];
- __le64 control;
+union eth_table_entry {
+ struct {
+ __le64 port;
+ __le64 state;
+ u8 mac_addr[6];
+ u8 resv[2];
+ __le64 control;
+ };
+ __le64 raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+ enum nfp_eth_rate rate;
+ unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+ { RATE_INVALID, 0, },
+ { RATE_10M, SPEED_10, },
+ { RATE_100M, SPEED_100, },
+ { RATE_1G, SPEED_1000, },
+ { RATE_10G, SPEED_10000, },
+ { RATE_25G, SPEED_25000, },
};
-static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
+static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate)
{
- unsigned int rate_xlate[] = {
- [RATE_INVALID] = 0,
- [RATE_10M] = SPEED_10,
- [RATE_100M] = SPEED_100,
- [RATE_1G] = SPEED_1000,
- [RATE_10G] = SPEED_10000,
- [RATE_25G] = SPEED_25000,
- };
+ int i;
- if (rate >= ARRAY_SIZE(rate_xlate))
- return 0;
+ for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].rate == rate)
+ return nsp_eth_rate_tbl[i].speed;
+
+ return 0;
+}
+
+static unsigned int nfp_eth_speed2rate(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].speed == speed)
+ return nsp_eth_rate_tbl[i].rate;
- return rate_xlate[rate];
+ return RATE_INVALID;
}
static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
@@ -110,8 +148,8 @@ static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
}
static void
-nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
- struct nfp_eth_table_port *dst)
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+ unsigned int index, struct nfp_eth_table_port *dst)
{
unsigned int rate;
u64 port, state;
@@ -129,14 +167,60 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
- rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
dst->speed = dst->lanes * rate;
+ dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+ dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
- snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
- FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
- FIELD_GET(NSP_ETH_PORT_LABEL, port));
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+ return;
+
+ dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+ dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+}
+
+static void
+nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++)
+ for (j = 0; j < table->count; j++) {
+ if (i == j)
+ continue;
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ nfp_warn(cpp,
+ "Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = true;
+ break;
+ }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry)
+{
+ if (entry->interface == NFP_INTERFACE_NONE) {
+ entry->port_type = PORT_NONE;
+ return;
+ }
+
+ if (entry->media == NFP_MEDIA_FIBRE)
+ entry->port_type = PORT_FIBRE;
+ else
+ entry->port_type = PORT_DA;
}
/**
@@ -166,10 +250,9 @@ struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
struct nfp_eth_table *
__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
{
- struct eth_table_entry *entries;
+ union eth_table_entry *entries;
struct nfp_eth_table *table;
- unsigned int cnt;
- int i, j, ret;
+ int i, j, ret, cnt = 0;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
@@ -178,93 +261,288 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
- kfree(entries);
- return NULL;
+ goto err;
}
- /* Some versions of flash will give us 0 instead of port count */
- cnt = ret;
- if (!cnt) {
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
- if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
- cnt++;
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count.
+ * For those that give a port count, verify it against the value
+ * calculated above.
+ */
+ if (ret && ret != cnt) {
+ nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n",
+ ret, cnt);
+ goto err;
}
table = kzalloc(sizeof(*table) +
sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
- if (!table) {
- kfree(entries);
- return NULL;
- }
+ if (!table)
+ goto err;
table->count = cnt;
for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
- nfp_eth_port_translate(&entries[i], i,
+ nfp_eth_port_translate(nsp, &entries[i], i,
&table->ports[j++]);
+ nfp_eth_mark_split_ports(cpp, table);
+ for (i = 0; i < table->count; i++)
+ nfp_eth_calc_port_type(cpp, &table->ports[i]);
+
kfree(entries);
return table;
+
+err:
+ kfree(entries);
+ return NULL;
}
-/**
- * nfp_eth_set_mod_enable() - set PHY module enable control bit
- * @cpp: NFP CPP handle
- * @idx: NFP chip-wide port index
- * @enable: Desired state
- *
- * Enable or disable PHY module (this usually means setting the TX lanes
- * disable bits).
- *
- * Return: 0 or -ERRNO.
- */
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
{
- struct eth_table_entry *entries;
+ union eth_table_entry *entries;
struct nfp_nsp *nsp;
- u64 reg;
int ret;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nsp = nfp_nsp_open(cpp);
if (IS_ERR(nsp)) {
kfree(entries);
- return PTR_ERR(nsp);
+ return nsp;
}
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
- goto exit_close_nsp;
+ goto err;
}
if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
nfp_warn(cpp, "trying to set port state on disabled port %d\n",
idx);
- ret = -EINVAL;
- goto exit_close_nsp;
+ goto err;
+ }
+
+ nfp_nsp_config_set_state(nsp, entries, idx);
+ return nsp;
+
+err:
+ nfp_nsp_close(nsp);
+ kfree(entries);
+ return ERR_PTR(-EIO);
+}
+
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+ nfp_nsp_config_set_modified(nsp, false);
+ nfp_nsp_config_clear_state(nsp);
+ nfp_nsp_close(nsp);
+ kfree(entries);
+}
+
+/**
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state. If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ int ret = 1;
+
+ if (nfp_nsp_config_modified(nsp)) {
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ ret = ret < 0 ? ret : 0;
+ }
+
+ nfp_eth_config_cleanup_end(nsp);
+
+ return ret;
+}
+
+/**
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = le64_to_cpu(entries[idx].state);
+ if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
}
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @configed: Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ entries = nfp_nsp_config_entries(nsp);
+
/* Check if we are already in requested state */
reg = le64_to_cpu(entries[idx].state);
- if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
- ret = 0;
- goto exit_close_nsp;
+ if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_CONFIGURED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
}
- reg = le64_to_cpu(entries[idx].control);
- reg &= ~NSP_ETH_CTRL_ENABLED;
- reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
- entries[idx].control = cpu_to_le64(reg);
+ return nfp_eth_config_commit_end(nsp);
+}
- ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
-exit_close_nsp:
- nfp_nsp_close(nsp);
- kfree(entries);
+/* Force inline, FIELD_* macroes require masks to be compilation-time known */
+static __always_inline int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+ const u64 mask, unsigned int val, const u64 ctrl_bit)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ unsigned int idx = nfp_nsp_config_idx(nsp);
+ u64 reg;
+
+ /* Note: set features were added in ABI 0.14 but the error
+ * codes were initially not populated correctly.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set operations not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are already in requested state */
+ reg = le64_to_cpu(entries[idx].raw[raw_idx]);
+ if (val == FIELD_GET(mask, reg))
+ return 0;
- return ret < 0 ? ret : 0;
+ reg &= ~mask;
+ reg |= FIELD_PREP(mask, val);
+ entries[idx].raw[raw_idx] = cpu_to_le64(reg);
+
+ entries[idx].control |= cpu_to_le64(ctrl_bit);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return 0;
+}
+
+/**
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_ANEG, mode,
+ NSP_ETH_CTRL_SET_ANEG);
+}
+
+/**
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @speed: Desired speed (per lane)
+ *
+ * Set lane speed. Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+ enum nfp_eth_rate rate;
+
+ rate = nfp_eth_speed2rate(speed);
+ if (rate == RATE_INVALID) {
+ nfp_warn(nfp_nsp_cpp(nsp),
+ "could not find matching lane rate for speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_RATE, rate,
+ NSP_ETH_CTRL_SET_RATE);
+}
+
+/**
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes: Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index a2850344f8b4..2d15a7c9d0de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -45,6 +45,13 @@
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
#define NFP_RESOURCE_ENTRY_NAME_SZ 8
/**
@@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
strncpy(name_pad, res->name, sizeof(name_pad));
/* Search for a matching entry */
- key = NFP_RESOURCE_TBL_KEY;
- if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
- key = crc32_posix(name_pad, sizeof(name_pad));
+ if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+ nfp_err(cpp, "Grabbing device lock not supported\n");
+ return -EOPNOTSUPP;
+ }
+ key = crc32_posix(name_pad, sizeof(name_pad));
for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
u64 addr = NFP_RESOURCE_TBL_BASE +
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 9709c8ca0774..159564d8dcdb 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -152,7 +152,6 @@ struct w90p910_ether {
struct tran_pdesc *tdesc;
dma_addr_t rdesc_phys;
dma_addr_t tdesc_phys;
- struct net_device_stats stats;
struct platform_device *pdev;
struct resource *res;
struct sk_buff *skb;
@@ -584,15 +583,6 @@ static int w90p910_ether_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
-{
- struct w90p910_ether *ether;
-
- ether = netdev_priv(dev);
-
- return &ether->stats;
-}
-
static int w90p910_send_frame(struct net_device *dev,
unsigned char *data, int length)
{
@@ -671,10 +661,10 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
ether->finish_tx = 0;
if (txbd->sl & TXDS_TXCP) {
- ether->stats.tx_packets++;
- ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += txbd->sl & 0xFFFF;
} else {
- ether->stats.tx_errors++;
+ dev->stats.tx_errors++;
}
txbd->sl = 0x0;
@@ -730,7 +720,7 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
- ether->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
@@ -738,24 +728,24 @@ static void netdev_rx(struct net_device *dev)
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
skb->protocol = eth_type_trans(skb, dev);
- ether->stats.rx_packets++;
- ether->stats.rx_bytes += length;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
netif_rx(skb);
} else {
- ether->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (status & RXDS_RP) {
dev_err(&pdev->dev, "rx runt err\n");
- ether->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
} else if (status & RXDS_CRCE) {
dev_err(&pdev->dev, "rx crc err\n");
- ether->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
dev_err(&pdev->dev, "rx alignment err\n");
- ether->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
- ether->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
}
}
@@ -912,7 +902,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_open = w90p910_ether_open,
.ndo_stop = w90p910_ether_close,
.ndo_start_xmit = w90p910_ether_start_xmit,
- .ndo_get_stats = w90p910_ether_stats,
.ndo_set_rx_mode = w90p910_ether_set_multicast_list,
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7b43a3b4abdc..3dd973475125 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter)
} while (--retries);
- if (!retries) {
- printk(KERN_ERR "Receive Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
- }
-
- return 0;
+ pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
+ return -EIO;
}
int netxen_init_firmware(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa6545b..2ab1aab7c3fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,19 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 10
+#define QED_REVISION_VERSION 10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION \
+ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+ (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -59,9 +71,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
+#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
/* cau states */
@@ -76,6 +87,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
/* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value) \
+ do { \
+ (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+ } while (0)
+
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -130,9 +150,35 @@ enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI,
+ QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_QED_TUNN_CLSS,
};
+struct qed_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum qed_tunn_clss tun_cls;
+};
+
+struct qed_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct qed_tunnel_info {
+ struct qed_tunn_update_type vxlan;
+ struct qed_tunn_update_type l2_geneve;
+ struct qed_tunn_update_type ip_geneve;
+ struct qed_tunn_update_type l2_gre;
+ struct qed_tunn_update_type ip_gre;
+
+ struct qed_tunn_update_udp_port vxlan_port;
+ struct qed_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
struct qed_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
@@ -198,6 +244,7 @@ enum qed_resources {
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
+ QED_BDQ,
QED_MAX_RESC,
};
@@ -205,8 +252,9 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
- QED_VF_L2_QUE,
+ QED_ISCSI_CQ,
QED_FCOE_CQ,
+ QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
@@ -219,7 +267,9 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G,
- QED_PORT_MODE_DE_1X25G
+ QED_PORT_MODE_DE_1X25G,
+ QED_PORT_MODE_DE_4X25G,
+ QED_PORT_MODE_DE_2X10G,
};
enum qed_dev_cap {
@@ -249,9 +299,14 @@ struct qed_hw_info {
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
- u8 num_tc;
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+ /* Amount of TCs which should be active according to DCBx or upper
+ * layer driver configuration.
+ */
+ u8 num_active_tc;
u8 offload_tc;
- u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
@@ -314,15 +369,19 @@ struct qed_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 low_latency_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -353,6 +412,12 @@ struct qed_fw_data {
u32 init_ops_size;
};
+#define DRV_MODULE_VERSION \
+ __stringify(QED_MAJOR_VERSION) "." \
+ __stringify(QED_MINOR_VERSION) "." \
+ __stringify(QED_REVISION_VERSION) "." \
+ __stringify(QED_ENGINEERING_VERSION)
+
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
@@ -364,7 +429,8 @@ struct qed_hwfn {
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
-#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+ (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
@@ -409,6 +475,11 @@ struct qed_hwfn {
struct qed_ptt *p_main_ptt;
struct qed_ptt *p_dpc_ptt;
+ /* PTP will be used only by the leading function.
+ * Usage of all PTP-apis should be synchronized as result.
+ */
+ struct qed_ptt *p_ptp_ptt;
+
struct qed_sb_sp_info *p_sp_sb;
struct qed_sb_attn_info *p_sb_attn;
@@ -455,6 +526,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
/* PWM region specific data */
+ u16 wid_count;
u32 dpi_size;
u32 dpi_count;
@@ -465,8 +537,8 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
- /* p_ptp_ptt is valid for leading HWFN only */
- struct qed_ptt *p_ptp_ptt;
+ struct qed_ptt *p_arfs_ptt;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -523,9 +595,7 @@ struct qed_dev {
u8 dp_level;
char name[NAME_SIZE];
- u8 type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+ enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
@@ -540,6 +610,9 @@ struct qed_dev {
u16 vendor_id;
u16 device_id;
+#define QED_DEV_ID_MASK 0xff00
+#define QED_DEV_ID_MASK_BB 0x1600
+#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -606,9 +679,7 @@ struct qed_dev {
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
-
- unsigned long tunn_mode;
-
+ struct qed_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -652,12 +723,19 @@ struct qed_dev {
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
+ u16 tunn_feature_mask;
};
-#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -693,6 +771,26 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+int qed_device_get_port_id(struct qed_dev *cdev);
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (BIT(0))
+#define PQ_FLAGS_MCOS (BIT(1))
+#define PQ_FLAGS_LB (BIT(2))
+#define PQ_FLAGS_OOO (BIT(3))
+#define PQ_FLAGS_ACK (BIT(4))
+#define PQ_FLAGS_OFLD (BIT(5))
+#define PQ_FLAGS_VFS (BIT(6))
+#define PQ_FLAGS_LLT (BIT(7))
+
+/* physical queue index for cm context intialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -721,5 +819,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7e3a6fed3da6..b3aaa985956e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -220,9 +219,6 @@ struct qed_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -238,12 +234,17 @@ struct qed_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
- type == PROTOCOLID_FCOE ||
- type == PROTOCOLID_ROCE;
+ type == PROTOCOLID_FCOE;
}
static bool tm_cid_proto(enum protocol_type type)
@@ -293,6 +294,9 @@ static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -304,16 +308,34 @@ struct qed_tm_iids {
u32 per_vf_tids;
};
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_mngr *p_mngr,
struct qed_tm_iids *iids)
{
- u32 i, j;
-
- for (i = 0; i < MAX_CONN_TYPES; i++) {
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ int i, j;
+
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
iids->per_vf_cids += p_cfg->cids_per_vf;
}
@@ -527,7 +549,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
return lines_to_skip;
}
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+ *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 curr_line, total, i, task_size, line;
@@ -551,7 +588,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
- p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
curr_line = p_mngr->pf_start_line;
/* CDUC PF */
@@ -560,7 +598,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* get the counters for the CDUC and QM clients */
qed_cxt_cdu_iids(p_mngr, &cdu_iids);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -574,7 +612,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUC VF */
- p_blk = &p_cli->vf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
@@ -588,7 +626,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUT PF */
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
/* first the 'working' task memory */
@@ -597,7 +635,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -612,7 +650,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
@@ -657,7 +696,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* 'working' memory */
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
- p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -666,7 +705,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -694,8 +734,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* QM */
- p_cli = &p_mngr->clients[ILT_CLI_QM];
- p_blk = &p_cli->pf_blks[0];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
@@ -719,7 +759,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* SRC */
- p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
qed_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
@@ -733,7 +773,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = roundup_pow_of_two(local_max);
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * sizeof(struct src_ent),
sizeof(struct src_ent));
@@ -744,11 +784,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* TM PF */
- p_cli = &p_mngr->clients[ILT_CLI_TM];
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
@@ -760,14 +800,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* TM VF */
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
if (total) {
- p_blk = &p_cli->vf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
@@ -777,8 +817,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = qed_cxt_get_srq_count(p_hwfn);
if (total) {
- p_cli = &p_mngr->clients[ILT_CLI_TSDM];
- p_blk = &p_cli->pf_blks[SRQ_BLK];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
@@ -787,13 +827,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
}
+ *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
- RESC_NUM(p_hwfn, QED_ILT)) {
- DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
- curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ RESC_NUM(p_hwfn, QED_ILT))
return -EINVAL;
+
+ return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 excess_lines, available_lines;
+ struct qed_cxt_mngr *p_mngr;
+ u32 ilt_page_size, elem_size;
+ struct qed_tid_seg *p_seg;
+ int i;
+
+ available_lines = RESC_NUM(p_hwfn, QED_ILT);
+ excess_lines = used_lines - available_lines;
+
+ if (!excess_lines)
+ return 0;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return 0;
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ elem_size = p_mngr->task_type_size[p_seg->type];
+ if (!elem_size)
+ continue;
+
+ return (ilt_page_size / elem_size) * excess_lines;
}
+ DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
return 0;
}
@@ -1127,7 +1204,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@@ -1367,7 +1444,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@@ -1393,22 +1470,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
- qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+ qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
}
/* CM PF */
-static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
- union qed_qm_pq_params pq_params;
- u16 pq;
-
/* XCM pure-LB queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return 0;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1640,7 +1710,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
u8 i;
memset(&tm_iids, 0, sizeof(tm_iids));
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
@@ -1758,9 +1828,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
qed_prs_init_common(p_hwfn);
}
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -1884,13 +1954,12 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
}
static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+ struct qed_rdma_pf_params *p_params,
+ u32 num_tasks)
{
- u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ u32 num_cons, num_qps, num_srqs;
enum protocol_type proto;
- num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
- num_tasks = num_mrs; /* each mr uses a single task id */
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) {
@@ -1919,7 +1988,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
}
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
@@ -1931,9 +2000,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_ROCE:
{
- qed_rdma_set_pf_params(p_hwfn,
- &p_hwfn->
- pf_params.rdma_pf_params);
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params,
+ rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
case QED_PCI_ETH:
@@ -1943,6 +2013,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons, 1);
+ p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
case QED_PCI_FCOE:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b010324268a..53ad532dc212 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -105,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
- *
+ * @param rdma_tasks - requested maximum
* @return int
*/
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
* @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
+ * @param last_line
*
* @return int
*/
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
* @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -163,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
- *
- *
* @param p_hwfn
+ * @param p_ptt
*/
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cfdadb658ade..d883ad5bec6d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
"%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
qed_dcbx_app_update[i].name, p_data->arr[id].update,
p_data->arr[id].enable, p_data->arr[id].priority,
- p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
}
}
@@ -204,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
p_data->arr[type].tc = tc;
/* QM reconf data */
- if (p_info->personality == personality) {
- if (personality == QED_PCI_ETH)
- p_info->non_offload_tc = tc;
- else
- p_info->offload_tc = tc;
- }
+ if (p_info->personality == personality)
+ p_info->offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -275,8 +271,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
- u8 tc, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
int priority;
@@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -558,8 +556,9 @@ qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -578,10 +577,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
- p_params->ets_willing,
- p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
if (p_params->ets_enabled && !p_params->max_ets_tc) {
p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
@@ -622,8 +621,7 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -635,8 +633,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -649,7 +646,6 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct qed_dcbx_operational_params *p_operational;
@@ -670,6 +666,7 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n");
return;
}
@@ -683,8 +680,14 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
- DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
- p_operational->ieee, p_operational->cee);
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
@@ -699,7 +702,6 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@@ -714,7 +716,6 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@@ -728,25 +729,24 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
}
static int
-qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_dcbx_get *p_params,
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params,
enum qed_mib_read_type type)
{
switch (type) {
case QED_DCBX_REMOTE_MIB:
- qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_MIB:
- qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_params(p_hwfn, p_params);
break;
case QED_DCBX_OPERATIONAL_MIB:
- qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_operational_params(p_hwfn, p_params);
break;
case QED_DCBX_REMOTE_LLDP_MIB:
- qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_LLDP_MIB:
- qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -904,7 +904,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
qed_sp_pf_update(p_hwfn);
}
}
- qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
qed_dcbx_aen(p_hwfn, type);
return rc;
@@ -912,17 +913,14 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
-
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
if (!p_hwfn->p_dcbx_info)
- rc = -ENOMEM;
+ return -ENOMEM;
- return rc;
+ return 0;
}
-void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
- struct qed_dcbx_info *p_dcbx_info)
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
}
@@ -961,14 +959,9 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
p_dcb_data = &p_dest->roce_dcb_data;
-
- if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE);
- if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE_V2);
-
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+ p_dcb_data = &p_dest->rroce_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2);
p_dcb_data = &p_dest->iscsi_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
p_dcb_data = &p_dest->eth_dcb_data;
@@ -994,7 +987,7 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
if (rc)
goto out;
- rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ rc = qed_dcbx_get_params(p_hwfn, p_get, type);
out:
qed_ptt_release(p_hwfn, p_ptt);
@@ -1169,6 +1162,9 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@@ -1245,6 +1241,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
if (dcbx_info->operational.ieee)
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
@@ -1794,8 +1792,9 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
- if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
- DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) &&
+ !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) {
+ DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n");
return 1;
}
@@ -1815,6 +1814,11 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
dcbx_set.enabled = true;
}
+ if (mode & DCB_CAP_DCBX_STATIC) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+ dcbx_set.enabled = true;
+ }
+
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return 1;
@@ -1823,7 +1827,7 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
qed_ptt_release(hwfn, ptt);
- return 0;
+ return rc;
}
static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
@@ -2202,15 +2206,46 @@ qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
+static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee)
+{
+ switch (selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case IEEE_8021QAZ_APP_SEL_STREAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_DGRAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_ANY:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
struct qed_app_entry *entry;
- bool ethtype;
u8 prio = 0;
+ u8 sf_ieee;
int i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n",
+ app->selector, app->protocol);
+
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2221,11 +2256,9 @@ static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return -EINVAL;
}
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_info->operational.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol)) {
prio = entry->prio;
break;
@@ -2253,14 +2286,22 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
struct qed_dcbx_set dcbx_set;
struct qed_app_entry *entry;
struct qed_ptt *ptt;
- bool ethtype;
+ u8 sf_ieee;
int rc, i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
+ app->selector, app->protocol, app->priority);
if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
return -EINVAL;
}
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2278,11 +2319,9 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
if (rc)
return -EINVAL;
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_set.config.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol))
break;
/* First empty slot */
@@ -2298,7 +2337,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
}
dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
- dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee;
dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 0fabe97f998d..414e26268f3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality;
};
-#define QED_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -122,7 +119,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *,
struct qed_ptt *, enum qed_mib_read_type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
-void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..483241b4b05d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
/* Chip IDs enum */
enum chip_ids {
- CHIP_RESERVED,
CHIP_BB_B0,
CHIP_K2,
MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
MEM_GROUP_BTB_RAM,
MEM_GROUP_RDIF_CTX,
MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
"BTB_RAM",
"RDIF_CTX",
"TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
u32 size_in_dwords;
};
+struct chip_platform_defs {
+ u8 num_ports;
+ u8 num_pfs;
+ u8 num_vfs;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
- struct {
- u8 num_ports;
- u8 num_pfs;
- } per_platform[MAX_PLATFORM_IDS];
+ struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
{ "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
- { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } },
+ { "k2",
+ { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } }
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
4, TCM_REG_SM_TASK_CTX},
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
7, MCM_REG_SM_TASK_CTX},
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
3, UCM_REG_SM_TASK_CTX},
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
0, 0},
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
12, YCM_REG_SM_TASK_CTX},
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
/* Block definitions array */
static struct block_defs block_grc_defs = {
- "grc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "grc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
- "pglue_b", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ "pglue_b",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
};
static struct block_defs block_cnig_defs = {
- "cnig", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "cnig",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
- "ncsi", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "ncsi",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
- "bmb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ "bmb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
};
static struct block_defs block_pcie_defs = {
- "pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
- "mcp2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "mcp2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
};
static struct block_defs block_pswhst_defs = {
- "pswhst", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
};
static struct block_defs block_pswhst2_defs = {
- "pswhst2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
};
static struct block_defs block_pswrd_defs = {
- "pswrd", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
};
static struct block_defs block_pswrd2_defs = {
- "pswrd2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
};
static struct block_defs block_pswwr_defs = {
- "pswwr", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswwr",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
- "pswrq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
};
static struct block_defs block_pswrq2_defs = {
- "pswrq2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
};
static struct block_defs block_pglcs_defs = {
- "pglcs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pglcs",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
};
static struct block_defs block_ptu_defs = {
- "ptu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "ptu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
};
static struct block_defs block_dmae_defs = {
- "dmae", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "dmae",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
};
static struct block_defs block_tcm_defs = {
- "tcm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tcm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
};
static struct block_defs block_mcm_defs = {
- "mcm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "mcm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
};
static struct block_defs block_ucm_defs = {
- "ucm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "ucm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
};
static struct block_defs block_xcm_defs = {
- "xcm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xcm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
};
static struct block_defs block_ycm_defs = {
- "ycm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ycm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
};
static struct block_defs block_pcm_defs = {
- "pcm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "pcm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
};
static struct block_defs block_qm_defs = {
- "qm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ "qm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
};
static struct block_defs block_tm_defs = {
- "tm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
};
static struct block_defs block_dorq_defs = {
- "dorq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "dorq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
};
static struct block_defs block_brb_defs = {
- "brb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "brb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
};
static struct block_defs block_src_defs = {
- "src", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "src",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
};
static struct block_defs block_prs_defs = {
- "prs", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "prs",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
};
static struct block_defs block_tsdm_defs = {
- "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsdm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
};
static struct block_defs block_msdm_defs = {
- "msdm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msdm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
};
static struct block_defs block_usdm_defs = {
- "usdm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usdm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
};
static struct block_defs block_xsdm_defs = {
- "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsdm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
};
static struct block_defs block_ysdm_defs = {
- "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysdm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
};
static struct block_defs block_psdm_defs = {
- "psdm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psdm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
};
static struct block_defs block_tsem_defs = {
- "tsem", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsem",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
};
static struct block_defs block_msem_defs = {
- "msem", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msem",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
};
static struct block_defs block_usem_defs = {
- "usem", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usem",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
};
static struct block_defs block_xsem_defs = {
- "xsem", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsem",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
};
static struct block_defs block_ysem_defs = {
- "ysem", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysem",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
};
static struct block_defs block_psem_defs = {
- "psem", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psem",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
};
static struct block_defs block_rss_defs = {
- "rss", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "rss",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
};
static struct block_defs block_tmld_defs = {
- "tmld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "tmld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
};
static struct block_defs block_muld_defs = {
- "muld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "muld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
};
static struct block_defs block_yuld_defs = {
- "yuld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "yuld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
};
static struct block_defs block_xyld_defs = {
- "xyld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xyld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
};
static struct block_defs block_prm_defs = {
- "prm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "prm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
};
static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb1",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
};
static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
};
static struct block_defs block_rpb_defs = {
- "rpb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rpb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
};
static struct block_defs block_btb_defs = {
- "btb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ "btb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
};
static struct block_defs block_pbf_defs = {
- "pbf", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
};
static struct block_defs block_rdif_defs = {
- "rdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
};
static struct block_defs block_tdif_defs = {
- "tdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
};
static struct block_defs block_cdu_defs = {
- "cdu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "cdu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
};
static struct block_defs block_ccfc_defs = {
- "ccfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "ccfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
};
static struct block_defs block_tcfc_defs = {
- "tcfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "tcfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
};
static struct block_defs block_igu_defs = {
- "igu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "igu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
};
static struct block_defs block_cau_defs = {
- "cau", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "cau",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
};
static struct block_defs block_umac_defs = {
- "umac", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "umac",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
- "nig", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "nig",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
};
static struct block_defs block_wol_defs = {
- "wol", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "wol",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
};
static struct block_defs block_bmbn_defs = {
- "bmbn", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ "bmbn",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
- "nwm", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "nwm",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
};
static struct block_defs block_nws_defs = {
- "nws", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "nws",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+ NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+ NWS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
- "ms", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "ms",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+ MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+ MS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
- "phy_pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "phy_pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
};
static struct block_defs block_led_defs = {
- "led", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+ "avs_wrap", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+ "ptld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_ms_defs,
&block_phy_pcie_defs,
&block_led_defs,
+ &block_avs_wrap_defs,
+ &block_rgfs_defs,
+ &block_tgfs_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
- {256, 256, 320},
- {32, 32, 32} },
+ {256, 320},
+ {32, 32} },
{ "rss_mem_key_msb", "rss_key", 1024,
- {128, 128, 208},
- {256, 256, 256} },
+ {128, 208},
+ {256, 256} },
{ "rss_mem_key_lsb", "rss_key", 2048,
- {128, 128, 208},
- {64, 64, 64} },
+ {128, 208},
+ {64, 64} },
{ "rss_mem_info", "rss_info", 3072,
- {128, 128, 208},
- {16, 16, 16} },
+ {128, 208},
+ {16, 16} },
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 128), (128 * 208)},
- {16, 16, 16} }
+ {(128 * 128), (128 * 208)},
+ {16, 16} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 4800, 5632} },
+ {4800, 5632} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 2880, 3680} },
+ {2880, 3680} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152, 1152} }
+ {1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
};
static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (!dev_data->grc.params_initialized) {
+ qed_dbg_grc_set_params_default(p_hwfn);
+ dev_data->grc.params_initialized = 1;
+ }
+}
+
/* Initializes debug data for the specified device */
static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB_B0;
- dev_data->mode_enable[MODE_BB_B0] = 1;
+ dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
+
+ /* Initializes the GRC parameters */
+ qed_dbg_grc_init_params(p_hwfn);
+
dev_data->initialized = true;
return DBG_STATUS_OK;
}
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
int printed_chars;
u32 offset = 0;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW image/version from PRAM in a non-reset SEMI */
bool found = false;
u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
+ u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
/* Find platform string and dump global params section header */
offset += qed_dump_section_hdr(dump_buf + offset,
- dump,
- "global_params",
- NUM_COMMON_GLOBAL_PARAMS +
- num_specific_global_params);
+ dump, "global_params", num_params);
/* Store params */
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
}
}
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
- return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- if (!dev_data->grc.param_set_by_user[i])
- dev_data->grc.param_val[i] =
- s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
/* Returns true if the specified entity (indicated by GRC param) should be
* included in the dump, false otherwise.
*/
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
}
}
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
{
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
* The following parameters are dumped:
* - 'count' = num_dumped_entries
* - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
* - 'param_name' = param_val (user param, dumped only if param_name != NULL and
* param_val != NULL)
*/
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+ if (dump)
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ else
+ offset += len;
+ return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+ u32 len)
+{
+ if (dump)
+ *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+ return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf,
bool dump, u32 addr, u32 len)
{
- u32 offset = 0, i;
+ u32 offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
+ return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 total_len,
+ u32 read_len, u32 skip_len)
+{
+ u32 offset = 0, reg_offset = 0;
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
if (dump) {
- *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
- for (i = 0; i < len; i++, addr++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- DWORDS_TO_BYTES(addr));
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32,
+ read_len,
+ total_len - reg_offset);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len);
+ reg_offset += curr_len;
+ addr += curr_len;
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32,
+ skip_len,
+ total_len - skip_len);
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
+ reg_offset += curr_len;
+ addr += curr_len;
+ }
+ }
} else {
- offset += len + 1;
+ offset += total_len;
}
return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
&input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ addr = GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset, dump,
- GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS),
- GET_FIELD(reg->data,
- DBG_DUMP_REG_LENGTH));
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
(*num_dumped_reg_entries)++;
}
} else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct chip_platform_defs *p_platform_defs;
u32 offset = 0, input_offset = 0;
- u8 port_id, pf_id;
+ struct chip_defs *p_chip_defs;
+ u8 port_id, pf_id, vf_id;
+ u16 fid;
+
+ p_chip_defs = &s_chip_defs[dev_data->chip_id];
+ p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
switch (split_type_id) {
case SPLIT_TYPE_NONE:
- case SPLIT_TYPE_VF:
offset += qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
break;
case SPLIT_TYPE_PORT:
- for (port_id = 0;
- port_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_ports;
+ for (port_id = 0; port_id < p_platform_defs->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
break;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0;
- pf_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_pfs;
+ for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
pf_id++) {
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, pf_id);
- offset += qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "pf", pf_id, param_name,
- param_val);
+ u8 pfid_shift =
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+ if (dump) {
+ fid = pf_id << pfid_shift;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_VF:
+ for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ vf_id++) {
+ u8 vfvalid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+ u8 vfid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+ if (dump) {
+ fid = BIT(vfvalid_shift) |
+ (vf_id << vfid_shift);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "vf", vf_id,
+ param_name,
+ param_val);
}
break;
default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
/* Pretend to original PF */
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ if (dump) {
+ fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
return offset;
}
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs
- [i].addr), 1);
+ addr,
+ 1);
num_regs++;
}
}
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
&attn_reg_arr[reg_idx];
u16 modes_buf_offset;
bool eval_mode;
+ u32 addr;
/* Check mode */
eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
/* Mode match - read and dump registers */
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- reg_data->mask_address,
- 1);
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS),
- 1);
+ addr = reg_data->mask_address;
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries += 2;
}
}
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 addr;
+
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
dump)
continue;
+ addr =
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- BYTES_TO_DWORDS(s_storm_defs[storm_id].
- sem_fast_mem_addr +
- SEM_FAST_REG_STALLED),
- 1);
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries++;
}
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, addr;
+
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ dump, 2, "eng", -1, NULL, NULL);
+
+ /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+ * skipped).
+ */
+ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+ offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+ addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+ offset +=
+ qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+
+ return offset;
+}
+
/* Dumps a GRC memory header (section and params).
* The following parameters are dumped:
* name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
* width - bit_width is dumped if it's not zero.
* packed - packed=1 is dumped if it's not false.
* mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 offset = 0;
char buf[64];
- if (!dword_len)
+ if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from %s...\n",
- dword_len, buf);
+ len, buf);
} else {
/* Dump address */
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr", byte_addr);
- if (dump && dword_len > 64)
+ dump, "addr",
+ DWORDS_TO_BYTES(addr));
+ if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- dword_len, byte_addr);
+ len, (u32)DWORDS_TO_BYTES(addr));
}
/* Dump len */
- offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
/* Dump bit width */
if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
name,
- byte_addr,
- dword_len,
+ addr,
+ len,
bit_width,
packed,
mem_group, is_storm, storm_letter);
- if (dump) {
- u32 i;
-
- for (i = 0; i < dword_len;
- i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- } else {
- offset += dword_len;
- }
-
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
return offset;
}
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
if (qed_grc_is_mem_included(p_hwfn,
(enum block_id)cond_hdr->block_id,
mem_group_id)) {
- u32 mem_byte_addr =
- DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS));
+ u32 mem_addr = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS);
u32 mem_len = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_LENGTH);
+ enum dbg_grc_params grc_param;
char storm_letter = 'a';
bool is_storm = false;
/* Update memory length for CCFC/TCFC memories
* according to number of LCIDs/LTIDs.
*/
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS)
- * (mem_len / MAX_LCIDS);
- else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id ==
+ MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS)
- * (mem_len / MAX_LTIDS);
+ grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
/* If memory is associated with Storm, update
* Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn, p_ptt,
dump_buf + offset, dump, NULL,
- mem_byte_addr, mem_len, 0,
+ mem_addr, mem_len, 0,
false,
s_mem_group_names[mem_group_id],
is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE +
- DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- 32,
- false,
- "ior",
- true,
- s_storm_defs
- [storm_id].letter);
- }
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 dwords, addr;
+
+ dwords = storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE;
+ addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ storm->letter);
}
}
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_size = (num_entries * entry_width) / 32;
+ u32 total_dwords = (num_entries * entry_width) / 32;
+ u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
bool packed = (entry_width == 16);
- u32 addr = rss_defs->addr;
- u32 i, j;
+ u32 rss_addr = rss_defs->addr;
+ u32 i, addr;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
rss_defs->mem_name,
- addr,
- total_size,
+ 0,
+ total_dwords,
entry_width,
packed,
rss_defs->type_name, false, 0);
if (!dump) {
- offset += total_size;
+ offset += total_dwords;
continue;
}
/* Dump RSS data */
- for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
- qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
- for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
- *(dump_buf + offset) =
- qed_rd(p_hwfn, p_ptt,
- RSS_REG_RSS_RAM_DATA +
- DWORDS_TO_BYTES(j));
+ for (i = 0; i < total_dwords;
+ i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ addr,
+ size);
}
}
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 total_blocks, ram_size, offset = 0, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
- u32 ram_size, total_blocks;
- u32 offset = 0, i, j;
+ struct big_ram_defs *big_ram;
- total_blocks =
- s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ big_ram = &s_big_ram_defs[big_ram_id];
+ total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
- strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
- strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(type_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
+ strncpy(mem_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
/* Read and dump Big RAM data */
for (i = 0; i < total_blocks / 2; i++) {
- qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
- i);
- for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
- s_big_ram_defs[big_ram_id].
- data_reg_addr +
- DWORDS_TO_BYTES(j));
+ u32 addr, len;
+
+ qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+ addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+ len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
bool block_enable[MAX_BLOCK_ID] = { 0 };
+ u32 offset = 0, addr;
bool halted = false;
- u32 offset = 0;
/* Halt MCP */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_SCRATCH,
+ BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
0, false, "MCP", false, 0);
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_CPU_REG_FILE,
+ BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
0, false, "MCP", false, 0);
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, "eng", -1, "block", "MCP");
+ addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (MISC_REG_SHARED_MEM_ADDR), 1);
+ addr,
+ 1);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
{
u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id, addr, i;
+ u32 offset = 0, block_id, line_id;
struct block_defs *p_block_defs;
if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
if (dump && !dev_data->block_in_reset[block_id]) {
u8 dbg_client_id =
p_block_defs->dbg_client_id[dev_data->chip_id];
+ u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ u32 len = STATIC_DEBUG_LINE_DWORDS;
/* Enable block's client */
qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
0xf, 0, 0, 0);
/* Read debug line info */
- for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
- i < STATIC_DEBUG_LINE_DWORDS;
- i++, offset++, addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
- addr);
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
u8 i, port_mode = 0;
u32 offset = 0;
- /* Check if emulation platform */
*num_dumped_dwords = 0;
- /* Fill GRC parameters that were not set by the user with their default
- * value.
- */
- qed_dbg_grc_set_params_default(p_hwfn);
-
/* Find port mode */
if (dump) {
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
if (qed_grc_get_param
(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
- else
- DP_NOTICE(p_hwfn,
- "Failed to mask parities using MFW\n");
}
}
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset,
dump,
block_enable, NULL, NULL);
+
+ /* Dump special registers */
+ offset += qed_grc_dump_special_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
}
/* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
- u32 grc_addr =
- DWORDS_TO_BYTES(GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS));
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
/* Write register header */
struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
memset(reg_hdr, 0, sizeof(*reg_hdr));
reg_hdr->size = reg->size;
SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
/* Write register values */
- for (i = 0; i < reg->size;
- i++, offset++, grc_addr += 4)
- dump_buf[offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
- }
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size);
}
+ }
}
return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
- u32 i, j, offset = 0;
+ u32 i, offset = 0;
u16 entry_id;
u8 reg_id;
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
/* Read current entry of all condition registers */
- if (dump) {
- u32 next_reg_offset = 0;
-
- for (reg_id = 0;
- reg_id < rule->num_cond_regs;
- reg_id++) {
- const struct dbg_idle_chk_cond_reg
- *reg = &cond_regs[reg_id];
-
- /* Find GRC address (if it's a memory,
- * the address of the specific entry is
- * calculated).
- */
- u32 grc_addr =
- DWORDS_TO_BYTES(
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS));
-
- if (reg->num_entries > 1 ||
- reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two
- (reg->entry_size) : 1;
-
- grc_addr +=
- DWORDS_TO_BYTES(
- (reg->start_entry +
- entry_id)
- * padded_entry_size);
- }
+ u32 next_reg_offset = 0;
- /* Read registers */
- if (next_reg_offset + reg->entry_size >=
- IDLE_CHK_MAX_ENTRIES_SIZE) {
- DP_NOTICE(p_hwfn,
- "idle check registers entry is too large\n");
- return 0;
- }
+ for (reg_id = 0; reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg =
+ &cond_regs[reg_id];
- for (j = 0; j < reg->entry_size;
- j++, next_reg_offset++,
- grc_addr += 4)
- cond_reg_values[next_reg_offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
+ /* Find GRC address (if it's a memory,the
+ * address of the specific entry is calculated).
+ */
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
+
+ addr += (reg->start_entry + entry_id) *
+ padded_entry_size;
}
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ next_reg_offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ cond_reg_values +
+ next_reg_offset,
+ dump, addr,
+ reg->entry_size);
}
/* Call rule's condition function - a return value of
* true indicates failure.
*/
if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values) || !dump) {
+ imm_values)) {
offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct mcp_file_att file_att;
/* Call NVRAM get file command */
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type, &ret_mcp_resp, &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att) != 0)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att);
/* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ if (nvm_result ||
+ (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
/* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
u32 running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- enum dbg_status status;
u32 nvram_image_type;
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
- status = qed_find_nvram_image(p_hwfn,
- p_ptt,
- nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
-
- return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
- u32 i;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
- size_in_dwords, grc_addr);
- for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
- buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+ return qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
}
/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
- u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
- u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
+ bool mcp_access;
int halted = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
*num_dumped_dwords = 0;
/* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump) {
+ if (dump && mcp_access) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "size", trace_data_size_dwords);
/* Read trace data from scratchpad into dump buffer */
- if (dump)
- qed_mcp_trace_read_data(p_hwfn,
- p_ptt,
- trace_data_grc_addr,
- trace_data_size_dwords,
- dump_buf + offset);
- offset += trace_data_size_dwords;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(trace_data_grc_addr),
+ trace_data_size_dwords);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "mcp_trace_meta", 1);
/* Read trace meta info */
- status = qed_mcp_trace_get_meta_info(p_hwfn,
- p_ptt,
- trace_data_size_bytes,
- &running_bundle_id,
- &trace_meta_offset_bytes,
- &trace_meta_size_bytes);
- if (status != DBG_STATUS_OK)
- return status;
+ if (mcp_access) {
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords =
+ BYTES_TO_DWORDS(trace_meta_size_bytes);
+ }
- /* Dump trace meta size param (trace_meta_size_bytes is always
- * dword-aligned).
- */
- trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- trace_meta_size_dwords);
+ /* Dump trace meta size param */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_meta_size_dwords);
/* Read trace meta image into dump buffer */
- if (dump) {
+ if (dump && trace_meta_size_dwords)
status = qed_mcp_trace_read_meta(p_hwfn,
- p_ptt,
- trace_meta_offset_bytes,
- trace_meta_size_bytes,
- dump_buf + offset);
- if (status != DBG_STATUS_OK)
- return status;
- }
-
- offset += trace_meta_size_dwords;
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status == DBG_STATUS_OK)
+ offset += trace_meta_size_dwords;
*num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ /* If no mcp access, indicate that the dump doesn't contain the meta
+ * data from NVRAM.
+ */
+ return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct fw_asserts_ram_section *asserts;
char storm_letter_str[2] = "?";
struct fw_info fw_info;
- u32 offset = 0, i;
+ u32 offset = 0;
u8 storm_id;
/* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
- last_list_idx, element_addr;
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ u32 last_list_idx, addr;
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+ asserts = &fw_info.fw_asserts_section;
+
/* Dump FW Asserts section header and params */
storm_letter_str[0] = s_storm_defs[storm_id].letter;
offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
storm_letter_str);
offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- fw_info.fw_asserts_section.
- list_element_dword_size);
+ asserts->list_element_dword_size);
if (!dump) {
- offset += fw_info.fw_asserts_section.
- list_element_dword_size;
+ offset += asserts->list_element_dword_size;
continue;
}
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
fw_asserts_section_addr =
s_storm_defs[storm_id].sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
- RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
- section_ram_line_offset);
+ RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
next_list_idx_addr =
fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_next_index_dword_offset);
+ DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
? next_list_idx
- : fw_info.fw_asserts_section.list_num_elements)
- - 1;
- element_addr =
- fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_dword_offset) +
- last_list_idx *
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_element_dword_size);
- for (i = 0;
- i < fw_info.fw_asserts_section.list_element_dword_size;
- i++, offset++, element_addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ : asserts->list_num_elements) - 1;
+ addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+ asserts->list_dword_offset +
+ last_list_idx * asserts->list_element_dword_size;
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, addr,
+ asserts->list_element_dword_size);
}
/* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
- /* Clear all GRC params */
- qed_dbg_grc_clear_params(p_hwfn);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return status;
}
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
/* Idle Check Dump */
*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return DBG_STATUS_OK;
}
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ /* validate buffer size */
+ status =
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
&needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
+ if (status != DBG_STATUS_OK &&
+ status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Perform dump */
- return qed_mcp_trace_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+ status = qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_reg_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_igu_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_protection_override_dump(p_hwfn,
- p_ptt,
- dump_buf, true, num_dumped_dwords);
+
+ status = qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf,
+ true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
REG_FIFO_ELEMENT_ADDR_FACTOR,
s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
- GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PORT),
s_privilege_strs[GET_FIELD(elements[i].
data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
i, address,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WRITE),
s_protection_strs[GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
*/
rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
&buf_size_dwords);
- if (rc != DBG_STATUS_OK)
+ if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e518f914eab1..5f31140d0b77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -75,7 +75,8 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
- val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -182,199 +183,573 @@ void qed_resc_free(struct qed_dev *cdev)
}
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
- qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ qed_dcbx_info_free(p_hwfn);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
- struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, multi_cos_tcs = 1;
- u8 pf_wfq = qm_info->pf_wfq;
- u32 pf_rl = qm_info->pf_rl;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
-
-#ifdef CONFIG_QED_SRIOV
- if (p_hwfn->cdev->p_iov_info)
- num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-#endif
- memset(qm_info, 0, sizeof(*qm_info));
+ u32 flags;
- num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ /* common flags */
+ flags = PQ_FLAGS_LB;
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- /* we subtract num_vfs because each require a rate limiter,
- * and one default rate limiter
- */
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
- num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+ /* feature flags */
+ if (IS_QED_SRIOV(p_hwfn->cdev))
+ flags |= PQ_FLAGS_VFS;
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8) num_pf_rls;
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case QED_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "unknown personality %d\n", p_hwfn->hw_info.personality);
+ return 0;
}
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ return flags;
+}
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x when only %04x are available\n",
- num_pqs, RESC_NUM(p_hwfn, QED_PQ));
- return -EINVAL;
- }
+/* Getters for resource amounts necessary for qm initialization */
+u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
- */
- qm_info->qm_pq_params = kcalloc(num_pqs,
- sizeof(struct init_qm_pq_params),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+ return IS_QED_SRIOV(p_hwfn->cdev) ?
+ p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
- qm_info->qm_vport_params = kcalloc(num_vports,
- sizeof(struct init_qm_vport_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+#define NUM_DEFAULT_RLS 1
- qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
- sizeof(struct init_qm_port_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
- qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* num RLs can't exceed resource amount of rls or vports */
+ num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
+
+ /* Make sure after we reserve there's something left */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+ return 0;
- vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
- /* First init rate limited queues */
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- }
+ return num_pf_rls;
+}
- /* First init per-TC PQs */
- for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
+u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == QED_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1;
- }
- }
+ /* all pqs share the same vport, except for vfs and pf_rl pqs */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8) RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0;
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- }
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* calc amount of PQs according to the requested flags */
+u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ qed_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc == 3 for
+ * AH 4-port and 4 otherwise.
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 :
+ ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0x7;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+ struct qed_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn,
+ "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_LLT:
+ return &qm_info->low_latency_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return NULL;
+}
+
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+ return;
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ qed_init_qm_pq(p_hwfn,
+ qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ qed_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ qed_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ qed_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ qed_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ qed_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ qed_init_qm_offload_pq(p_hwfn);
+
+ /* low latency pq */
+ qed_init_qm_low_latency_pq(p_hwfn);
+
+ /* done sharing vports */
+ qed_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+ if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return -EINVAL;
+ }
+
+ if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
+ }
return 0;
+}
-alloc_err:
- qed_qm_info_free(p_hwfn);
- return -ENOMEM;
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq,
+ qm_info->start_vport,
+ qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq,
+ qm_info->first_vf_pq,
+ qm_info->num_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en,
+ qm_info->pf_wfq_en,
+ qm_info->vport_rl_en,
+ qm_info->vport_wfq_en,
+ qm_info->pf_wfq,
+ qm_info->pf_rl,
+ qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ port = &(qm_info->qm_port_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+ i,
+ port->active,
+ port->active_phys_tcs,
+ port->num_pbf_cmd_lines,
+ port->num_btb_blocks, port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &(qm_info->qm_vport_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i,
+ vport->vport_rl, vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "%d ", vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &(qm_info->qm_pq_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i,
+ pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ qed_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ qed_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ qed_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ qed_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ qed_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ qed_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
@@ -391,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
bool b_rc;
int rc;
- /* qm_info is allocated in qed_init_qm_info() which is already called
- * from qed_resc_alloc() or previous call of qed_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- qed_qm_info_free(p_hwfn);
-
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn, false);
- if (rc)
- return rc;
+ qed_init_qm_info(p_hwfn);
/* stop PF's qm queues */
spin_lock_bh(&qm_lock);
@@ -415,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -434,6 +800,47 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ int rc;
+
+ rc = qed_init_qm_sanity(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ qed_init_qm_get_num_pqs(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ p_hwfn->cdev->num_ports_in_engines,
+ GFP_KERNEL);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ qed_qm_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -442,8 +849,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
+ u32 rdma_tasks, excess_tasks;
struct qed_consq *p_consq;
struct qed_eq *p_eq;
+ u32 line_count;
int i, rc = 0;
if (IS_VF(cdev))
@@ -465,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* Set the HW cid/tid numbers (in the contest manager)
* Must be done prior to any further computations.
*/
- rc = qed_cxt_set_pf_params(p_hwfn);
+ rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn, true);
+ rc = qed_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ qed_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
- rc = qed_cxt_cfg_ilt_compute(p_hwfn);
- if (rc)
- goto alloc_err;
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "too many ILT lines; re-computing with less lines\n");
+ /* In case there are not enough ILT lines we reduce the
+ * number of RDMA tasks and re-compute.
+ */
+ excess_tasks =
+ qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+ if (!excess_tasks)
+ goto alloc_err;
+
+ rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+ rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "failed ILT compute. Requested too many lines: %u\n",
+ line_count);
+
+ goto alloc_err;
+ }
+ }
/* CID map / ILT shadow table / T2
* The talbes sizes are determined by the computations above
@@ -674,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_B0);
+ if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (QED_IS_AH(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+ p_hwfn->cdev->type);
+ return -EINVAL;
+ }
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -693,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
- return;
+ return -EINVAL;
}
switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
+
+ return 0;
}
/* Init run time data for all PFs on an engine. */
@@ -748,16 +1192,67 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
}
}
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (L1_CACHE_BYTES > wr_mbs)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ L1_CACHE_BYTES, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+}
+
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
- u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -784,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_unpretend(p_hwfn, p_ptt);
+ qed_init_cache_line_size(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
@@ -814,7 +1299,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -832,17 +1318,15 @@ static int
qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size */
- dpi_page_size_1 = QED_WID_SIZE * n_cpus;
- dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
- dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
+ dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
+ dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
dpi_bit_shift = ilog2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -870,13 +1354,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 pwm_regsize, norm_regsize;
u32 non_pwm_conn, min_addr_reg1;
- u32 db_bar_size, n_cpus;
+ u32 db_bar_size, n_cpus = 1;
u32 roce_edpm_mode;
u32 pf_dems_shift;
int rc = 0;
u8 cond;
- db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->cdev->num_hwfns > 1)
db_bar_size /= 2;
@@ -931,6 +1415,8 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
+ p_hwfn->wid_count = (u16) n_cpus;
+
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
norm_regsize,
@@ -967,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -987,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl = 100000;
}
- qed_cxt_hw_init_pf(p_hwfn);
+ qed_cxt_hw_init_pf(p_hwfn, p_ptt);
qed_int_igu_init_rt(p_hwfn);
@@ -1095,25 +1581,47 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data)
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+ struct qed_drv_load_params *p_drv_load)
+{
+ memset(p_load_req, 0, sizeof(*p_load_req));
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+static int qed_vf_start(struct qed_hwfn *p_hwfn,
+ struct qed_hw_init_params *p_params)
{
+ if (p_params->p_tunn) {
+ qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return 0;
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+{
+ struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
- if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL;
}
if (IS_PF(cdev)) {
- rc = qed_init_fw_data(cdev, bin_fw_data);
+ rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
if (rc)
return rc;
}
@@ -1128,26 +1636,32 @@ int qed_hw_init(struct qed_dev *cdev,
}
if (IS_VF(cdev)) {
- p_hwfn->b_int_enabled = 1;
+ qed_vf_start(p_hwfn, p_params);
continue;
}
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- qed_calc_hw_mode(p_hwfn);
+ rc = qed_calc_hw_mode(p_hwfn);
+ if (rc)
+ return rc;
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+ qed_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
if (rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc;
}
- qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+ load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1168,11 +1682,15 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn, p_hwfn->hw_info.hw_mode,
- b_hw_start, int_mode,
- allow_npar_tx_switch);
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected load code [0x%08x]", load_code);
rc = -EINVAL;
break;
}
@@ -1212,10 +1730,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
- drv_mb_param = (FW_MAJOR_VERSION << 24) |
- (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) |
- (FW_ENGINEERING_VERSION);
+ drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param);
@@ -1290,27 +1805,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
int qed_hw_stop(struct qed_dev *cdev)
{
- int rc = 0, t_rc;
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc, rc2 = 0;
int j;
for_each_hwfn(cdev, j) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &cdev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+
+ qed_slowpath_irq_sync(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
rc = qed_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1333,34 +1874,54 @@ int qed_hw_stop(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt, false);
- if (t_rc != 0)
- rc = t_rc;
+ rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ rc2 = -EINVAL;
+ }
}
- return rc;
+ return rc2;
}
-void qed_hw_stop_fastpath(struct qed_dev *cdev)
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
@@ -1378,100 +1939,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
- }
-}
-
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
-{
- if (IS_VF(p_hwfn->cdev))
- return;
-
- /* Re-open incoming traffic */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
-}
-
-static int qed_reg_assert(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 reg, bool expected)
-{
- u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return -EINVAL;
+ qed_ptt_release(p_hwfn, p_ptt);
}
return 0;
}
-int qed_hw_reset(struct qed_dev *cdev)
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
- u32 unload_resp, unload_param;
- u32 wol_param;
- int i;
-
- switch (cdev->wol_config) {
- case QED_OV_WOL_DISABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
- break;
- case QED_OV_WOL_ENABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
- break;
- default:
- DP_NOTICE(cdev,
- "Unknown WoL configuration %02x\n", cdev->wol_config);
- /* Fallthrough */
- case QED_OV_WOL_DEFAULT:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
- }
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
- if (IS_VF(cdev)) {
- rc = qed_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
-
- /* Disable PF in HW blocks */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- TCFC_REG_STRONG_ENABLE_PF, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- CCFC_REG_STRONG_ENABLE_PF, 0);
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
- /* Send unload command to MCP */
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
- return rc;
- }
- }
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
- return rc;
+ return 0;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -1485,10 +1974,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
/* clear indirect access */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+ } else {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1522,7 +2026,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_features = 1;
+ u32 non_l2_sbs = 0;
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
@@ -1530,204 +2034,260 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have.
*/
- num_features++;
-
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
- }
- feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
- num_features,
- RESC_NUM(p_hwfn, QED_L2_QUEUE));
-
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- feat_num[QED_VF_L2_QUE] =
- min_t(u32,
- RESC_NUM(p_hwfn, QED_L2_QUEUE) -
- FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
+ non_l2_sbs = feat_num[QED_RDMA_CNQ];
+ }
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ /* Start by allocating VF queues, then PF's */
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[QED_VF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE),
+ sb_cnt_info.sb_iov_cnt);
+ feat_num[QED_PF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_SB) -
+ non_l2_sbs,
+ RESC_NUM(p_hwfn,
+ QED_L2_QUEUE) -
+ FEAT_NUM(p_hwfn,
+ QED_VF_L2_QUE));
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
- RESC_NUM(p_hwfn, QED_SB), num_features);
+ (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ RESC_NUM(p_hwfn, QED_SB));
}
-static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
- case QED_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
case QED_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case QED_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case QED_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case QED_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case QED_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case QED_MAC:
+ return "MAC";
case QED_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case QED_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case QED_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case QED_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case QED_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ return "BDQ";
+ case QED_SB:
+ return "SB";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ int rc;
+
+ rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ switch (res_id) {
+ case QED_LL2_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ /* No need for a case for QED_CMDQS_CQS since
+ * CNQ/CMDQS are the same resource.
+ */
+ resc_max_val = NUM_OF_CMDQS_CQS;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+ : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ break;
+ case QED_BDQ:
+ resc_max_val = BDQ_NUM_RESOURCES;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return -EINVAL;
}
- return mfw_res_id;
+ return 0;
}
-static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
- case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
- break;
case QED_L2_QUEUE:
- dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case QED_VPORT:
- dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
break;
case QED_RSS_ENG:
- dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case QED_PQ:
- /* The granularity of the PQs is 8 */
- dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
- dflt_resc_num &= ~0x7;
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case QED_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+ *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+ RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
break;
- default:
+ case QED_BDQ:
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
break;
+ case QED_SB:
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ *p_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ default:
+ return -EINVAL;
}
- return dflt_resc_num;
-}
-
-static const char *qed_hw_get_resc_name(enum qed_resources res_id)
-{
switch (res_id) {
- case QED_SB:
- return "SB";
- case QED_L2_QUEUE:
- return "L2_QUEUE";
- case QED_VPORT:
- return "VPORT";
- case QED_RSS_ENG:
- return "RSS_ENG";
- case QED_PQ:
- return "PQ";
- case QED_RL:
- return "RL";
- case QED_MAC:
- return "MAC";
- case QED_VLAN:
- return "VLAN";
- case QED_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case QED_ILT:
- return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
- case QED_CMDQS_CQS:
- return "CMDQS_CQS";
- case QED_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
+ break;
default:
- return "UNKNOWN_RESOURCE";
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
}
+
+ return 0;
}
-static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
int rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- /* Default values assumes that each function received equal share */
- dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
+ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc) {
DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
- }
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
-
- memset(&resc_info, 0, sizeof(resc_info));
- resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d [%s] with the MFW resources\n",
- res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
+ return rc;
}
- rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n",
@@ -1740,13 +2300,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- DP_NOTICE(p_hwfn,
- "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
- res_id,
- qed_hw_get_resc_name(res_id),
- mcp_resp, dflt_resc_num, dflt_resc_start);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -1754,13 +2313,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
-
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -1778,19 +2333,80 @@ out:
return 0;
}
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
{
- u8 res_id;
int rc;
+ u8 res_id;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
- rc = qed_hw_set_resc_info(p_hwfn, res_id);
+ rc = __qed_hw_set_resc_info(p_hwfn, res_id);
if (rc)
return rc;
}
+ return 0;
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params resc_unlock_params;
+ struct qed_resc_lock_params resc_lock_params;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u8 res_id;
+ int rc;
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ */
+ qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ QED_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (!rc && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ return -EBUSY;
+ } else {
+ rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc && rc != -EINVAL) {
+ DP_NOTICE(p_hwfn,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = qed_hw_set_resc_info(p_hwfn);
+ if (rc)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
/* Sanity for ILT */
- if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
+ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1799,8 +2415,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
- DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id),
@@ -1808,6 +2422,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
RESC_START(p_hwfn, res_id));
return 0;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ return rc;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1860,9 +2479,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
@@ -1976,8 +2601,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct qed_dev *cdev = p_hwfn->cdev;
- num_funcs = MAX_NUM_PFS_BB;
+ num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
@@ -1990,12 +2616,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
- if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
- num_funcs = 0;
- eng_mask = 0xaaaa;
+ if (QED_IS_BB(cdev)) {
+ if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
} else {
num_funcs = 1;
- eng_mask = 0x5554;
+ eng_mask = 0xfffe;
}
/* Get the number of the enabled functions on the engine */
@@ -2027,24 +2658,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 port_mode;
- int rc;
-
- /* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
- rc = qed_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
- }
- /* Read the port mode */
- port_mode = qed_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2676,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
/* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1;
}
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->cdev->num_ports_in_engines = 0;
+
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->cdev->num_ports_in_engines++;
+ }
+
+ if (!p_hwfn->cdev->num_ports_in_engines) {
+ DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ int rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ qed_hw_info_port_num(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt);
@@ -2085,33 +2752,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ p_hwfn->hw_info.num_active_tc = 1;
+
qed_get_num_funcs(p_hwfn, p_ptt);
if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
- return qed_hw_get_resc(p_hwfn);
+ return qed_hw_get_resc(p_hwfn, p_ptt);
}
-static int qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
- cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
+ /* Determine type */
+ device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case QED_DEV_ID_MASK_BB:
+ cdev->type = QED_DEV_TYPE_BB;
+ break;
+ case QED_DEV_ID_MASK_AH:
+ cdev->type = QED_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+ return -EBUSY;
+ }
+
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
MASK_FIELD(CHIP_REV, cdev->chip_rev);
- cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
- tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@@ -2120,15 +2802,17 @@ static int qed_get_dev_info(struct qed_dev *cdev)
cdev->num_hwfns = 1;
}
- cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
- cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns,
- "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ QED_IS_BB(cdev) ? "BB" : "AH",
+ 'A' + cdev->chip_rev,
+ (int)cdev->chip_metal,
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
@@ -2174,7 +2858,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = qed_get_dev_info(p_hwfn->cdev);
+ rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
goto err1;
}
@@ -2195,6 +2879,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
goto err2;
}
+ /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+ * is called as it sets the ports number in an engine.
+ */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
if (rc)
@@ -2236,11 +2929,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ addr = cdev->regview +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = addr;
- /* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ addr = cdev->doorbells +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -3363,3 +4059,22 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+ return QED_IS_BB(cdev) ? 2 : 1;
+}
+
+static int qed_device_num_ports(struct qed_dev *cdev)
+{
+ /* in CMT always only one port */
+ if (cdev->num_hwfns > 1)
+ return 1;
+
+ return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+}
+
+int qed_device_get_port_id(struct qed_dev *cdev)
+{
+ return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 6812003411cd..cefe3ee9064a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/
void qed_resc_setup(struct qed_dev *cdev);
+enum qed_override_force_load {
+ QED_OVERRIDE_FORCE_LOAD_NONE,
+ QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+ QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define QED_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+ /* Tunneling parameters */
+ struct qed_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum qed_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports for tx-switching */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct qed_drv_load_params *p_drv_load_params;
+};
+
/**
* @brief qed_hw_init -
*
* @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- * for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- * Pass NULL if not using binary fw file.
+ * @param p_params
*
* @return int
*/
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -128,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev);
*
* @param cdev
*
+ * @return int
*/
-void qed_hw_stop_fastpath(struct qed_dev *cdev);
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
* @brief qed_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
*
- * @param cdev
- *
- */
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
+ * @param p_hwfn
*
* @return int
*/
-int qed_hw_reset(struct qed_dev *cdev);
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
/**
* @brief qed_hw_prepare -
@@ -441,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cbc81412174f..21a58fffd02b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
- p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
+ p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
struct fcoe_conn_offload_ramrod_data *p_data;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- u16 pq_id = 0, tmp;
+ u16 physical_q0, tmp;
int rc;
/* Get SPQ entry */
@@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data = &p_ramrod->offload_ramrod_data;
/* Transmission PQ is the first of the PF */
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
- p_conn->physical_q0 = cpu_to_le16(pq_id);
- p_data->physical_q0 = cpu_to_le16(pq_id);
+ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q0);
+ p_data->physical_q0 = cpu_to_le16(physical_q0);
p_data->conn_id = cpu_to_le16(p_conn->conn_id);
DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
static int
qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u32 active_segs = 0;
@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
@@ -753,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
static int qed_fcoe_stop(struct qed_dev *cdev)
{
+ struct qed_ptt *p_ptt;
int rc;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
@@ -766,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt)
+ return -EAGAIN;
+
/* Stop the fcoe */
- rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
+ rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb663bb..858a57a73589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
+ CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved1[5];
};
union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
-struct core_tx_bd_flags {
- u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags;
+ struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+enum dcb_dhcp_update_flag {
+ DONT_UPDATE_DCB_DHCP,
+ UPDATE_DCB,
+ UPDATE_DSCP,
+ UPDATE_DCB_DSCP,
+ MAX_DCB_DHCP_UPDATE_FLAG
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
ETH_EDPM_OUT_OF_SYNC,
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
+ ETH_ANTI_SPOOFING_ERR,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
struct regpair ll2_mac_filter_discard;
struct regpair ll2_conn_disabled_discard;
struct regpair iscsi_irregular_pkt;
- struct regpair reserved;
+ struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
+ struct regpair reserved;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x19d0000,
+ GRCBASE_TGFS = 0x19e0000,
+ GRCBASE_PTLD = 0x19f0000,
+ GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_TGFS,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
__le32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
- __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ __le16 block_attn_offset;
__le16 reserved;
__le32 sts_val;
__le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
- __le16 attn_idx_offset;
+ __le16 block_attn_offset;
__le32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
__le32 sts_clr_address;
__le32 mask_address;
};
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
__le16 reserved;
- struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
};
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF,
+ DBG_BUS_FILTER_TYPE_PRE,
+ DBG_BUS_FILTER_TYPE_POST,
+ DBG_BUS_FILTER_TYPE_ON,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
/* Debug bus frame modes */
enum dbg_bus_frame_modes {
DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD,
+ DBG_BUS_POST_TRIGGER_DROP,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
/* Debug bus target IDs */
enum dbg_bus_targets {
/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
/* GRC Dump data */
struct dbg_grc_data {
- __le32 param_val[40]; /* Value of each GRC parameter. Array size must
- * match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
- * set by the user (0/1). Array size must
- * match the enum dbg_grc_params.
- */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 param_val[48];
};
/* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
};
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
struct dbg_bus_data bus; /* Debug Bus data */
struct idle_chk_data idle_chk; /* Idle Check data */
u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
*/
u8 chip_id; /* Chip ID (from enum chip_ids) */
u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
enum init_modes {
MODE_RESERVED,
- MODE_BB_B0,
+ MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_40G,
MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -2686,6 +2783,13 @@ struct iro {
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ * default value.
+ *
+ * @param p_hwfn - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
* @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
* GRC Dump.
*
@@ -3369,6 +3473,11 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id);
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6);
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3418,7 +3527,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
@@ -3482,7 +3591,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x4cb0, 0x80, 0x0, 0x0, 0x80},
{0x6318, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3630,13 @@ static const struct iro iro_arr[47] = {
{0xd888, 0x38, 0x0, 0x0, 0x24},
{0x12c38, 0x10, 0x0, 0x0, 0x8},
{0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x30, 0x0, 0x0, 0x10},
- {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5000, 0x10, 0x0, 0x0, 0x10},
+ {0x5020, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -4595,6 +4704,12 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+enum eth_ip_type {
+ ETH_IPV4,
+ ETH_IPV6,
+ MAX_ETH_IP_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -4752,6 +4867,18 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+enum gft_logic_filter_type {
+ GFT_FILTER_TYPE,
+ RFS_FILTER_TYPE,
+ MAX_GFT_LOGIC_FILTER_TYPE
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4822,6 +4949,16 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+struct rx_update_gft_filter_data {
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length;
+ __le16 rx_qid_or_action_icid;
+ u8 vport_id;
+ u8 filter_type;
+ u8 filter_action;
+ u8 reserved;
+};
+
/* Ramrod data for rx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
@@ -4944,7 +5081,10 @@ struct vport_update_ramrod_data_cmn {
u8 update_mtu_flg;
__le16 mtu;
- u8 reserved[2];
+ u8 update_ctl_frame_checks_en_flg;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -4962,6 +5102,652 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -6165,7 +6951,7 @@ struct ystorm_roce_conn_st_ctx {
};
struct xstorm_roce_conn_st_ctx {
- struct regpair temp[22];
+ struct regpair temp[24];
};
struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +7006,7 @@ struct roce_create_qp_req_ramrod_data {
__le16 mtu;
__le16 pd;
__le16 sq_num_pages;
- __le16 reseved2;
+ __le16 low_latency_phy_queue;
struct regpair sq_pbl_addr;
struct regpair orq_pbl_addr;
__le16 local_mac_addr[3];
@@ -6234,7 +7020,7 @@ struct roce_create_qp_req_ramrod_data {
u8 stats_counter_id;
u8 reserved3[7];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -6282,15 +7068,16 @@ struct roce_create_qp_resp_ramrod_data {
__le32 dst_gid[4];
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
- __le32 reserved2[2];
+ __le16 low_latency_phy_queue;
+ u8 reserved2[6];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +7086,7 @@ struct roce_destroy_qp_req_ramrod_data {
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8213,7 @@ struct ystorm_fcoe_conn_st_ctx {
u8 fcp_rsp_size;
__le16 mss;
struct regpair reserved;
+ __le16 min_frame_size;
u8 protection_info_flags;
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8232,6 @@ struct ystorm_fcoe_conn_st_ctx {
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
- u8 reserved3[2];
};
struct fcoe_vlan_fields {
@@ -8273,10 +9060,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
@@ -8322,10 +9109,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
@@ -8335,8 +9122,8 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
@@ -8440,7 +9227,7 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg10;
__le32 reg11;
__le32 exp_stat_sn;
- __le32 reg13;
+ __le32 ongoing_fast_rxmit_seq;
__le32 reg14;
__le32 reg15;
__le32 reg16;
@@ -8466,10 +9253,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
@@ -8490,10 +9277,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
@@ -8539,7 +9326,7 @@ struct tstorm_iscsi_conn_ag_ctx {
__le32 reg6;
__le32 reg7;
__le32 reg8;
- u8 byte2;
+ u8 cid_offload_cnt;
u8 byte3;
__le16 word0;
};
@@ -8831,11 +9618,24 @@ struct eth_stats {
u64 r511;
u64 r1023;
u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
u64 rfcs;
u64 rxcf;
u64 rxpf;
@@ -8852,14 +9652,36 @@ struct eth_stats {
u64 t511;
u64 t1023;
u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
u64 txpf;
u64 txpp;
- u64 tlpiec;
- u64 tncl;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
u64 rbyte;
u64 rxuca;
u64 rxmca;
@@ -8943,12 +9765,12 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+#define DCBX_TCP_OOO_TC (4)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
u32 tc_bw_tbl[2];
u32 tc_tsa_tbl[2];
@@ -8957,6 +9779,9 @@ struct dcbx_ets_feature {
#define DCBX_ETS_TSA_ETS 2
};
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
@@ -9067,6 +9892,10 @@ struct dcb_dscp_map {
struct public_global {
u32 max_path;
u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
u32 debug_mb_offset;
u32 phymod_dbg_mb_offset;
struct couple_mode_teaming cmt;
@@ -9248,9 +10077,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -9345,6 +10176,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -9362,6 +10194,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -9393,6 +10265,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -9405,12 +10278,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
@@ -9436,6 +10311,33 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -9524,12 +10426,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -9549,6 +10455,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
@@ -9659,6 +10569,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+
u32 e_lane_cfg1;
u32 e_lane_cfg2;
u32 f_lane_cfg1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 899cad7f97ea..a05feb38c6ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -58,6 +58,7 @@ struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct qed_ptt_pool {
@@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
@@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -800,55 +807,3 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH ||
- proto == PROTOCOLID_ISCSI ||
- proto == PROTOCOLID_ROCE) && !p_params) {
- DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- case PROTOCOLID_ISCSI:
- if (p_params->iscsi.q_idx == 1)
- pq_id = p_hwfn->qm_info.pure_ack_pq;
- break;
- case PROTOCOLID_ROCE:
- if (p_params->roce.dcqcn)
- pq_id = p_params->roce.qpid;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- if (pq_id > p_hwfn->qm_info.num_pf_rls)
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_FCOE:
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
-
- return pq_id;
-}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 9277264d2e65..f2505c691c26 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -297,9 +297,6 @@ union qed_qm_pq_params {
} roce;
};
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *params);
-
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a6852695..67200c5498ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands with
- * the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
- if (is_bb_a0)
- cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
+ bool rl_valid = p_params->pq_params[i].rl_valid &&
+ (p_params->pq_params[i].vport_id <
+ MAX_QM_GLOBAL_RLS);
+
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
+
+ if (p_params->pq_params[i].rl_valid && !rl_valid)
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- p_params->pq_params[i].rl_valid ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg,
+ QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- p_params->pq_params[i].rl_valid ?
+ rl_valid ?
p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id /
+ QM_PF_QUEUE_GROUP_SIZE] |=
+ BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
- (p_params->pf_id % MAX_NUM_PFS_BB);
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+ crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
return 0;
}
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{
u8 i, vport_id;
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
@@ -955,3 +961,132 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
+
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id)
+{
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ u32 *p_ramline, i;
+
+ p_ramline = (u32 *)&ramline;
+
+ /*stop using gft logic */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+ memset(&camline, 0, sizeof(union gft_cam_line_union));
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ memset(&ramline, 0, sizeof(union gft_cam_line_union));
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
+
+ hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
+
+ qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
+ }
+}
+
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6)
+{
+ u32 rfs_cm_hdr_event_id, *p_ramline;
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ int i;
+
+ rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ p_ramline = (u32 *)&ramline;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
+ /* Configure Registers for RFS mode */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+ camline.cam_line_mapped.camline = 0;
+
+ /* cam line is now valid!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* filters are per PF!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ if (!(tcp && udp)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ if (tcp)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* write characteristics to cam */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM +
+ CAM_LINE_SIZE * pf_id);
+
+ /* write line to RAM - compare to filter 4 tuple */
+ ramline.low32bits = 0;
+ ramline.high32bits = 0;
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(p_ramline + i));
+
+ /* set default profile so that no filter match will happen */
+ ramline.low32bits = 0xffff;
+ ramline.high32bits = 0xffff;
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
+ *(p_ramline + i));
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e0d4dc..4a2e7be5bf72 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
}
/* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+ buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 84310b60849b..0ed24d6e6c65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2500,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
u8 timeset, timer_res;
- u8 num_tc = 1, i;
+ u8 i;
/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 098766f7fe88..339c91dfa658 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_queue = &p_init->q_params;
+ /* Sanity */
+ if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+ p_params->num_queues,
+ p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ return -EINVAL;
+ }
+
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
@@ -216,7 +225,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
- p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -270,11 +279,10 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
struct tcp_offload_params *p_tcp = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
- u16 pq0_id = 0, pq1_id = 0;
dma_addr_t r2tq_pbl_addr;
dma_addr_t xhq_pbl_addr;
dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
int rc = 0;
u32 dval;
u16 wval;
@@ -297,16 +305,14 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
/* Transmission PQ is the first of the PF */
- memset(&pq_params, 0, sizeof(pq_params));
- pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q0 = cpu_to_le16(pq0_id);
- p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
/* iSCSI Pure-ACK PQ */
- pq_params.iscsi.q_idx = 1;
- pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q1 = cpu_to_le16(pq1_id);
- p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
@@ -593,21 +599,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
@@ -857,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
p_stats->iscsi_rx_packet_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+ p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
p_stats->iscsi_cmdq_threshold_cnt =
le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
p_stats->iscsi_rq_threshold_cnt =
@@ -1003,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
info->secondary_bdq_rq_addr =
qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
return rc;
}
@@ -1304,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats)
+{
+ struct qed_iscsi_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_iscsi_stats(cdev, &proto_stats)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect ISCSI statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+ stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+ stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+ stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.common = &qed_common_ops_pass,
.ll2 = &qed_ll2_ops_pass,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index 20c187f4ed0b..ae98f772cbc0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
+
+/**
+ * @brief - Fills provided statistics struct with statistics.
+ *
+ * @param cdev
+ * @param stats - points to struct that will be filled with statistics.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+ struct qed_iscsi_info *p_iscsi_info) {}
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index df932be5a4e5..746fed4099c8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -938,15 +938,12 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
- union qed_qm_pq_params pq_params;
int rc;
- memset(&pq_params, 0, sizeof(pq_params));
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
- qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
- &pq_params));
+ qed_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc)
return rc;
@@ -1470,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats)
{
+ struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1605,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
@@ -1768,6 +1799,84 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
+static void
+qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp, p_cfg_params->udp,
+ p_cfg_params->ipv4, p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+static int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length, u16 qid,
+ u8 vport_id, bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ }
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(length);
+ p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1898,7 +2007,11 @@ static int qed_start_vport(struct qed_dev *cdev,
return rc;
}
- qed_hw_start_fastpath(p_hwfn);
+ rc = qed_hw_start_fastpath(p_hwfn);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+ return rc;
+ }
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
@@ -2141,7 +2254,13 @@ static int qed_start_txq(struct qed_dev *cdev,
#define QED_HW_STOP_RETRY_LIMIT (10)
static int qed_fastpath_stop(struct qed_dev *cdev)
{
- qed_hw_stop_fastpath(cdev);
+ int rc;
+
+ rc = qed_hw_stop_fastpath(cdev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop Fastpath\n");
+ return rc;
+ }
return 0;
}
@@ -2166,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params)
{
- struct qed_tunn_update_params tunn_info;
+ struct qed_tunnel_info tunn_info;
int i, rc;
- if (IS_VF(cdev))
- return 0;
-
memset(&tunn_info, 0, sizeof(tunn_info));
- if (tunn_params->update_vxlan_port == 1) {
- tunn_info.update_vxlan_udp_port = 1;
- tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ if (tunn_params->update_vxlan_port) {
+ tunn_info.vxlan_port.b_update_port = true;
+ tunn_info.vxlan_port.port = tunn_params->vxlan_port;
}
- if (tunn_params->update_geneve_port == 1) {
- tunn_info.update_geneve_udp_port = 1;
- tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ if (tunn_params->update_geneve_port) {
+ tunn_info.geneve_port.b_update_port = true;
+ tunn_info.geneve_port.port = tunn_params->geneve_port;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_tunnel_info *tun;
+
+ tun = &hwfn->cdev->tunnel;
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
-
if (rc)
return rc;
+
+ if (IS_PF_SRIOV(hwfn)) {
+ u16 vxlan_port, geneve_port;
+ int j;
+
+ vxlan_port = tun->vxlan_port.port;
+ geneve_port = tun->geneve_port.port;
+
+ qed_for_each_vf(hwfn, j) {
+ qed_iov_bulletin_set_udp_ports(hwfn, j,
+ vxlan_port,
+ geneve_port);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
}
return 0;
@@ -2315,6 +2449,59 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
+static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_arfs_config_params arfs_config_params;
+
+ memset(&arfs_config_params, 0, sizeof(arfs_config_params));
+ arfs_config_params.tcp = true;
+ arfs_config_params.udp = true;
+ arfs_config_params.ipv4 = true;
+ arfs_config_params.ipv6 = true;
+ arfs_config_params.arfs_enable = en_searcher;
+
+ qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &arfs_config_params);
+ return 0;
+}
+
+static void
+qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
+ void *cookie, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
+ void *dev = p_hwfn->cdev->ops_cookie;
+
+ op->arfs_filter_op(dev, cookie, fw_return_code);
+}
+
+static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
+ dma_addr_t mapping, u16 length,
+ u16 vport_id, u16 rx_queue_id,
+ bool add_filter)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_spq_comp_cb cb;
+ int rc = -EINVAL;
+
+ cb.function = qed_arfs_sp_response_handler;
+ cb.cookie = cookie;
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
+ &cb, mapping, length, rx_queue_id,
+ vport_id, add_filter);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to issue a-RFS filter configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Successfully issued a-RFS filter configuration\n");
+
+ return rc;
+}
+
static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
@@ -2356,6 +2543,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
.tunn_config = &qed_tunn_configure,
+ .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+ .configure_arfs_searcher = &qed_configure_arfs_searcher,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index e763abd334f6..6f44229899eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -185,6 +185,14 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_BCAST 0x20
};
+struct qed_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable;
+};
+
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0d3cef409c96..09c86411918c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
@@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
@@ -1090,7 +1090,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
u16 pq_id = 0, pbl_size;
int rc = -EINVAL;
@@ -1127,9 +1126,18 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->conn.tx_tc;
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ switch (p_ll2_conn->conn.tx_tc) {
+ case LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ break;
+ case OOO_LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+ break;
+ default:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ break;
+ }
+
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
switch (conn_type) {
@@ -1400,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
u8 qid;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
p_rx = &p_ll2_conn->rx_queue;
p_tx = &p_ll2_conn->tx_queue;
@@ -1439,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx->cur_completing_frag_num = 0;
*p_tx->p_fw_cons = 0;
- qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ if (rc)
+ goto out;
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
p_ll2_conn->queue_id = qid;
@@ -1453,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -1591,33 +1611,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++;
}
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2,
- struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type type,
- dma_addr_t first_frag,
- u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
- u16 frag_idx;
+ u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- start_bd->bd_flags.as_bitfield = bd_flags;
- start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
- CORE_TX_BD_FLAGS_START_BD_SHIFT;
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+ bd_data |= bd_flags;
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
@@ -1642,9 +1663,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0;
- (*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0;
}
@@ -1823,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_txq_flush(p_hwfn, connection_handle);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
@@ -1847,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -2241,11 +2270,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb);
- flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..59992cf20d42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -54,6 +55,8 @@
#include "qed_dev_api.h"
#include "qed_ll2.h"
#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
@@ -227,10 +230,25 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
@@ -238,6 +256,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+ dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
@@ -588,6 +607,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+}
+
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
@@ -630,19 +662,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
-static int qed_nic_reset(struct qed_dev *cdev)
-{
- int rc;
-
- rc = qed_hw_reset(cdev);
- if (rc)
- return rc;
-
- qed_resc_free(cdev);
-
- return 0;
-}
-
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
@@ -743,7 +762,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
for_each_hwfn(cdev, i)
@@ -875,10 +895,12 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
/* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/
@@ -900,11 +922,15 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
- struct qed_tunn_start_params tunn_info;
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+#ifdef CONFIG_RFS_ACCEL
struct qed_ptt *p_ptt;
+#endif
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -920,13 +946,18 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (p_ptt) {
- QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
- } else {
- DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
- goto err;
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ goto err;
+ }
}
+#endif
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -953,27 +984,47 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_dbg_pf_init(cdev);
}
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN |
- 1 << QED_MODE_IPGENEVE_TUNN;
-
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
-
/* Start the slowpath */
- rc = qed_hw_init(cdev, &tunn_info, true,
- cdev->int_params.out.int_mode,
- true, data);
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
/* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev);
@@ -1014,9 +1065,12 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
- if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+#ifdef CONFIG_RFS_ACCEL
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_iov_wq_stop(cdev, false);
@@ -1031,8 +1085,11 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
- qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
@@ -1042,7 +1099,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
}
qed_disable_msix(cdev);
- qed_nic_reset(cdev);
+
+ qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
@@ -1653,13 +1711,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 87fde205149f..7266b36a2655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
+struct qed_mcp_cmd_elem {
+ struct list_head list;
+ struct qed_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+ if (!p_cmd_elem)
+ goto out;
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+ list_del(&p_cmd_elem->list);
+ kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+ u16 seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return NULL;
+}
+
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ list_for_each_entry_safe(p_cmd_elem,
+ p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list) {
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
}
+
kfree(p_hwfn->mcp_info);
return 0;
@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return 0;
}
@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
goto err;
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->cmd_lock);
+ spin_lock_init(&p_info->link_lock);
+
+ INIT_LIST_HEAD(&p_info->cmd_list);
+
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- spin_lock_init(&p_info->lock);
- spin_lock_init(&p_info->link_lock);
-
return 0;
err:
@@ -201,68 +262,39 @@ err:
return -ENOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- spin_lock_bh(&p_hwfn->mcp_info->lock);
+ u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
*/
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
-
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn,
- "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
- return -EBUSY;
- }
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return 0;
-}
-
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != 0)
- return rc;
+ /* Ensure that only a single thread is accessing the mailbox */
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
- (DRV_MSG_CODE_MCP_RESET | seq));
+
+ /* Set drv command along with the updated sequence */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
/* Wait for MFW response */
@@ -281,72 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = -EAGAIN;
}
- qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
{
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 seq, cnt = 1, actual_mb_seq;
- int rc = 0;
-
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
*/
- if (p_hwfn->mcp_info->mcp_hist !=
- qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
- qed_load_mcp_offsets(p_hwfn, p_ptt);
- qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+ struct qed_mcp_cmd_elem, list);
+ return !p_cmd_elem->b_is_completed;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ return false;
+}
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params *p_mb_params;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return -EAGAIN;
+
+ p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return -EINVAL;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb,
+ union_data);
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+ memset(&union_data, 0, sizeof(union_data));
+ if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ memcpy(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "wrote command (%x) to MFW MB param 0x%08x\n",
- (cmd | seq), param);
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
+ int rc = 0;
+
+ /* Wait until the mailbox is non-occupied */
do {
- /* Wait for MFW response */
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!qed_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ } while (++cnt < max_retries);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < QED_DRV_MB_MAX_RETRIES));
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EAGAIN;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "[after %d ms] read (%x) seq is (%x) from FW MB\n",
- cnt * delay, *o_mcp_resp, seq);
-
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = -EAGAIN;
+ /* Send the mailbox command */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ /* Wait for the MFW response */
+ do {
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
+ udelay(delay);
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ return -EAGAIN;
}
+
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp,
+ p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return 0;
+
+err:
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -354,9 +521,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params)
{
- u32 union_data_addr;
-
- int rc;
+ size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -364,33 +531,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc)
- return rc;
-
- if (p_mb_params->p_data_src != NULL)
- qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
-
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != NULL)
- qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
-
- qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size,
+ p_mb_params->data_dst_size, union_data_size);
+ return -EINVAL;
+ }
- return rc;
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -401,32 +552,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data data_src;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
- memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd;
mb_params.param = param;
- /* In case of UNLOAD_DONE, set the primary MAC */
- if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
- (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
- u8 *p_mac = p_hwfn->cdev->wol_mac;
-
- data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
- data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
- p_mac[4] << 8 | p_mac[5];
-
- DP_VERBOSE(p_hwfn,
- (QED_MSG_SP | NETIF_MSG_IFDOWN),
- "Setting WoL MAC: %pM --> [%08x,%08x]\n",
- p_mac, data_src.wol_mac.mac_upper,
- data_src.wol_mac.mac_lower);
-
- mb_params.p_data_src = &data_src;
- }
-
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -445,13 +576,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -460,55 +595,413 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+ memcpy(o_buf, raw_data, *o_txn_size);
return 0;
}
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *p_load_code)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+ u8 exist_drv_role,
+ enum qed_override_force_load override_force_load)
+{
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case QED_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_QEDE_BITMAP_IDX BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+ if (IS_ENABLED(CONFIG_QEDE))
+ config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_SRIOV))
+ config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_RDMA))
+ config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_FCOE))
+ config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_ISCSI))
+ config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_LL2))
+ config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+ return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+ u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
+#define QED_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_in_params *p_in_params,
+ struct qed_load_req_out_params *p_out_params)
{
- struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
int rc;
+ memset(&load_req, 0, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
memset(&mb_params, 0, sizeof(mb_params));
- /* Load Request */
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type;
- memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
- /* if mcp fails to respond we must abort */
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0,
+ load_req.drv_ver_1,
+ load_req.fw_ver,
+ load_req.misc0,
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ QED_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+ }
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
return rc;
}
- *p_load_code = mb_params.mcp_resp;
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0,
+ load_rsp.drv_ver_1,
+ load_rsp.fw_ver,
+ load_rsp.misc0,
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+ enum qed_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case QED_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case QED_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_load_req_force {
+ QED_LOAD_REQ_FORCE_NONE,
+ QED_LOAD_REQ_FORCE_PF,
+ QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+
+ enum qed_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case QED_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case QED_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case QED_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params)
+{
+ struct qed_load_req_out_params out_params;
+ struct qed_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = QED_VERSION;
+ in_params.drv_ver_1 = qed_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc)
+ return rc;
+
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ if (qed_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+ DP_NOTICE(p_hwfn,
+ "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+ qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return -EBUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ */
+ DP_NOTICE(p_hwfn,
+ "PF is already loaded\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return -EBUSY;
}
+ p_params->load_code = out_params.load_code;
+
return 0;
}
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ switch (p_hwfn->cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown WoL configuration %02x\n",
+ p_hwfn->cdev->wol_config);
+ /* Fallthrough */
+ case QED_OV_WOL_DEFAULT:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ /* Set the primary MAC if WoL is enabled */
+ if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+ u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+ memset(&wol_mac, 0, sizeof(wol_mac));
+ wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+ wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+ p_mac[4] << 8 | p_mac[5];
+
+ DP_VERBOSE(p_hwfn,
+ (QED_MSG_SP | NETIF_MSG_IFDOWN),
+ "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+ p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+ mb_params.p_data_src = &wol_mac;
+ mb_params.data_src_size = sizeof(wol_mac);
+ }
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -549,7 +1042,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
int rc;
int i;
@@ -560,8 +1052,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
@@ -744,33 +1236,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *phy_cfg;
+ struct eth_phy_cfg phy_cfg;
int rc = 0;
u32 cmd;
/* Set the shmem configuration according to params */
- phy_cfg = &union_data.drv_phy_cfg;
- memset(phy_cfg, 0, sizeof(*phy_cfg));
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- phy_cfg->adv_speed = params->speed.advertised_speeds;
- phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg->speed,
- phy_cfg->pause,
- phy_cfg->adv_speed,
- phy_cfg->loopback_mode,
- phy_cfg->feature_config_flags);
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
@@ -778,7 +1268,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -805,7 +1296,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
enum qed_mcp_protocol_type stats_type;
union qed_mcp_protocol_stats stats;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
switch (type) {
@@ -835,8 +1325,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- memcpy(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
@@ -963,7 +1453,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
default:
- DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
}
@@ -1316,24 +1806,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
__be32 val;
u32 i;
int rc;
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
-
+ memset(&drv_version, 0, sizeof(drv_version));
+ drv_version.version = p_ver->version;
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
- *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1450,7 +1939,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u32 mfw_mac[2];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
@@ -1458,8 +1947,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
DRV_MSG_CODE_VMAC_TYPE_SHIFT;
mb_params.param |= MCP_PF_ID(p_hwfn);
- ether_addr_copy(&union_data.raw_data[0], mac);
- mb_params.p_data_src = &union_data;
+
+ /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+ * in 32-bit granularity.
+ * So the MAC has to be set in native order [and not byte order],
+ * otherwise it would be read incorrectly by MFW after swap.
+ */
+ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+ mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+ mb_params.p_data_src = (u8 *)mfw_mac;
+ mb_params.data_src_size = 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
@@ -1724,52 +2222,426 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc;
}
-#define QED_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case QED_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case QED_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case QED_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case QED_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case QED_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case QED_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case QED_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case QED_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case QED_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR 2
#define QED_RESC_ALLOC_VERSION_MINOR 0
#define QED_RESC_ALLOC_VERSION \
((QED_RESC_ALLOC_VERSION_MAJOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
(QED_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+
+struct qed_resc_alloc_in_params {
+ u32 cmd;
+ enum qed_resources res_id;
+ u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_alloc_in_params *p_in_params,
+ struct qed_resc_alloc_out_params *p_out_params)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct resource_info mfw_resc_info;
int rc;
+ memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id));
+ return -EINVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return -EINVAL;
+ }
+
memset(&mb_params, 0, sizeof(mb_params));
- memset(&union_data, 0, sizeof(union_data));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = QED_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
- /* Need to have a sufficient large struct, as the cmd_and_union
- * is going to do memcpy from and to it.
- */
- memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd,
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
- mb_params.p_data_src = &union_data;
- mb_params.p_data_dst = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
- /* Copy the data back */
- memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num,
+ p_out_params->resc_start,
+ p_out_params->vf_resc_num,
+ p_out_params->vf_resc_start, p_out_params->flags);
+
+ return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return -EINVAL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ switch (p_params->timeout) {
+ case QED_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case QED_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param,
- p_resc_info->res_id,
- p_resc_info->size,
- p_resc_info->offset,
- p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
return 0;
}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ int rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ msleep(retry_interval_in_ms);
+ } else {
+ udelay(p_params->retry_interval);
+ }
+ }
+
+ rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent)
+{
+ if (p_lock) {
+ memset(p_lock, 0, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock) {
+ memset(p_unlock, 0, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 368e88de146c..5ae35d6cc7d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h"
+#include "qed_dev_api.h"
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -479,14 +480,18 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * \
+ qed_device_num_engines((_p_hwfn)->cdev)))
+
struct qed_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- spinlock_t lock;
+ /* List for mailbox commands which were sent and wait for a response */
+ struct list_head cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ spinlock_t cmd_lock;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
@@ -506,14 +511,16 @@ struct qed_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
};
struct qed_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
@@ -564,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
+enum qed_drv_role {
+ QED_DRV_ROLE_OS,
+ QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+ /* Input params */
+ enum qed_drv_role drv_role;
+ u8 timeout_val;
+ bool avoid_eng_reset;
+ enum qed_override_force_load override_force_load;
+
+ /* Output params */
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *p_load_code);
+ struct qed_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -708,6 +743,41 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
@@ -720,19 +790,106 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
+#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL 31
+
+enum qed_resc_lock {
+ QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+ QED_RESC_LOCK_PTP_PORT0,
+ QED_RESC_LOCK_PTP_PORT1,
+ QED_RESC_LOCK_PTP_PORT2,
+ QED_RESC_LOCK_PTP_PORT3,
+ QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
+ QED_RESC_LOCK_RESC_INVALID
+};
+
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * @brief - Initiates PF FLR
*
* @param p_hwfn
* @param p_ptt
- * @param p_resc_info - descriptor of requested resource
- * @param p_mcp_resp
- * @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
+#define QED_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params);
+
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 378afce58b3f..db96670192c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -41,6 +41,7 @@
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
+#include "qed_cxt.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
@@ -48,15 +49,18 @@ static struct qed_ooo_archipelago
*p_ooo_info,
u32 cid)
{
- struct qed_ooo_archipelago *p_archipelago = NULL;
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+ struct qed_ooo_archipelago *p_archipelago;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid)
- return p_archipelago;
- }
+ if (idx >= p_ooo_info->max_num_archipelagos)
+ return NULL;
- return NULL;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+ if (list_empty(&p_archipelago->isles_list))
+ return NULL;
+
+ return p_archipelago;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
@@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
+ u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
- u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
@@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+ cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
@@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info)
return NULL;
+ p_ooo_info->cid_base = cid_base;
+ p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
- INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
- INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
@@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
- for (i = 0; i < max_num_archipelagos; i++) {
+ for (i = 0; i < max_num_archipelagos; i++)
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
- list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
@@ -178,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
- bool b_found = false;
-
- if (list_empty(&p_ooo_info->archipelagos_list))
- return;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid) {
- list_del(&p_archipelago->list_entry);
- b_found = true;
- break;
- }
- }
-
- if (!b_found)
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago)
return;
while (!list_empty(&p_archipelago->isles_list)) {
@@ -216,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
-
- list_add_tail(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
- struct qed_ooo_archipelago *p_arch;
+ struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
+ u32 i;
- while (!list_empty(&p_ooo_info->archipelagos_list)) {
- p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
- struct qed_ooo_archipelago,
- list_entry);
-
- list_del(&p_arch->list_entry);
+ for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+ p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
- while (!list_empty(&p_arch->isles_list)) {
- p_isle = list_first_entry(&p_arch->isles_list,
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle,
list_entry);
@@ -258,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
- list_add_tail(&p_arch->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
@@ -378,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
-
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
@@ -426,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
return;
}
- if (!p_archipelago &&
- !list_empty(&p_ooo_info->free_archipelagos_list)) {
- p_archipelago =
- list_first_entry(&p_ooo_info->free_archipelagos_list,
- struct qed_ooo_archipelago, list_entry);
+ if (!p_archipelago) {
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
- list_del(&p_archipelago->list_entry);
- if (!list_empty(&p_archipelago->isles_list)) {
- DP_NOTICE(p_hwfn,
- "Free OOO connection is not empty\n");
- INIT_LIST_HEAD(&p_archipelago->isles_list);
- }
- p_archipelago->cid = cid;
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->archipelagos_list);
- } else if (!p_archipelago) {
- DP_NOTICE(p_hwfn, "No more free OOO connections\n");
- list_add(&p_isle->list_entry,
- &p_ooo_info->free_isles_list);
- list_add(&p_buffer->list_entry,
- &p_ooo_info->free_buffers_list);
- return;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -517,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 4f138fb5f533..791ad0f8b759 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -60,9 +60,7 @@ struct qed_ooo_isle {
};
struct qed_ooo_archipelago {
- struct list_head list_entry;
struct list_head isles_list;
- u32 cid;
};
struct qed_ooo_history {
@@ -75,14 +73,14 @@ struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
- struct list_head free_archipelagos_list;
- struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
+ u16 max_num_archipelagos;
+ u16 cid_base;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index d27aa85da23c..434a164a76ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -34,7 +34,7 @@
#include "qed_dev_api.h"
#include "qed_hw.h"
#include "qed_l2.h"
-#include "qed_ptp.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
/* 16 nano second time quantas to wait before making a Drift adjustment */
@@ -45,6 +45,82 @@
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
+static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
+{
+ switch (qed_device_get_port_id(p_hwfn->cdev)) {
+ case 0:
+ return QED_RESC_LOCK_PTP_PORT0;
+ case 1:
+ return QED_RESC_LOCK_PTP_PORT1;
+ case 2:
+ return QED_RESC_LOCK_PTP_PORT2;
+ case 3:
+ return QED_RESC_LOCK_PTP_PORT3;
+ default:
+ return QED_RESC_LOCK_RESC_INVALID;
+ }
+}
+
+static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_lock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* MFW doesn't support resource locking, first PF on the port
+ * has lock ownership.
+ */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ return 0;
+
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EBUSY;
+ } else if (!rc && !params.b_granted) {
+ DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
+ return -EBUSY;
+ }
+
+ return rc;
+}
+
+static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
+
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
+ if (rc == -EINVAL) {
+ /* MFW doesn't support locking, first PF has lock ownership */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ rc = 0;
+ } else {
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EINVAL;
+ }
+ } else if (rc) {
+ DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
+ }
+
+ return rc;
+}
+
/* Read Rx timestamp */
static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
{
@@ -112,39 +188,73 @@ static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
}
/* Filter PTP protocol packets that need to be timestamped */
-static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
- enum qed_ptp_filter_type type)
+static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
+ enum qed_ptp_filter_type rx_type,
+ enum qed_ptp_hwtstamp_tx_type tx_type)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
- u32 rule_mask, parm_mask;
+ u32 rule_mask, enable_cfg = 0x0;
- switch (type) {
- case QED_PTP_FILTER_L2_IPV4_IPV6:
- parm_mask = 0x6AA;
- rule_mask = 0x3EEE;
+ switch (rx_type) {
+ case QED_PTP_FILTER_NONE:
+ enable_cfg = 0x0;
+ rule_mask = 0x3FFF;
break;
- case QED_PTP_FILTER_L2:
- parm_mask = 0x6BF;
- rule_mask = 0x3EFF;
+ case QED_PTP_FILTER_ALL:
+ enable_cfg = 0x7;
+ rule_mask = 0x3CAA;
break;
- case QED_PTP_FILTER_IPV4_IPV6:
- parm_mask = 0x7EA;
- rule_mask = 0x3FFE;
+ case QED_PTP_FILTER_V1_L4_EVENT:
+ enable_cfg = 0x3;
+ rule_mask = 0x3FFA;
break;
- case QED_PTP_FILTER_IPV4:
- parm_mask = 0x7EE;
+ case QED_PTP_FILTER_V1_L4_GEN:
+ enable_cfg = 0x3;
rule_mask = 0x3FFE;
break;
+ case QED_PTP_FILTER_V2_L4_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FAA;
+ break;
+ case QED_PTP_FILTER_V2_L4_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FEE;
+ break;
+ case QED_PTP_FILTER_V2_L2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CFF;
+ break;
+ case QED_PTP_FILTER_V2_L2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EFF;
+ break;
+ case QED_PTP_FILTER_V2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CAA;
+ break;
+ case QED_PTP_FILTER_V2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EEE;
+ break;
default:
- DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
return -EINVAL;
}
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+ if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
+ }
/* Reset possibly old timestamps */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
@@ -248,7 +358,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
static int qed_ptp_hw_enable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
+ return -EBUSY;
+ }
+
+ p_hwfn->p_ptp_ptt = p_ptt;
+
+ rc = qed_ptp_res_lock(p_hwfn, p_ptt);
+ if (rc) {
+ DP_INFO(p_hwfn,
+ "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+ return rc;
+ }
/* Reset PTP event detection rules - will be configured in the IOCTL */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
@@ -262,12 +390,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
/* Pause free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_AH(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
/* Resume free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+ }
/* Disable drift register */
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
@@ -281,22 +417,13 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
return 0;
}
-static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
-
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
-
- return 0;
-}
-
static int qed_ptp_hw_disable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ qed_ptp_res_unlock(p_hwfn, p_ptt);
+
/* Reset PTP event detection rules */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
@@ -308,12 +435,14 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+
return 0;
}
const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
- .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
- .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+ .cfg_filters = qed_ptp_hw_cfg_filters,
.read_rx_ts = qed_ptp_hw_read_rx_ts,
.read_tx_ts = qed_ptp_hw_read_tx_ts,
.read_cc = qed_ptp_hw_read_cc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df60cd2..1ae73b2d6d1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -160,13 +160,13 @@
0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
-#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
0x2aa404UL
-#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
0x2aa408UL
-#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
0x2aa40cUL
-#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
@@ -356,6 +356,10 @@
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+ 0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+ 0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
+#define NWS_REG_DBG_SELECT \
+ 0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+ 0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+ 0x700130UL
+#define NWS_REG_DBG_FORCE_VALID \
+ 0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME \
+ 0x700138UL
+#define MS_REG_DBG_SELECT \
+ 0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+ 0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+ 0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+ 0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+ 0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
0x000b48UL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+ 4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define NWS_REG_NWS_CMU \
@@ -1520,4 +1550,22 @@
#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b28591c..56289d7cd306 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,17 +66,31 @@
#include "qed_roce.h"
#include "qed_ll2.h"
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+ u16 icid =
+ (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+ /* icid release in this async event can occur only if the icid
+ * was offloaded to the FW. In case it wasn't offloaded this is
+ * handled in qed_roce_sp_destroy_qp.
+ */
+ qed_roce_free_real_icid(p_hwfn, icid);
+ } else {
+ struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
- p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
- p_eqe->opcode, &p_eqe->data);
+ events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+ fw_event_code,
+ &rdma_data->async_handle);
+ }
}
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count)
+ struct qed_bmap *bmap, u32 max_count, char *name)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
@@ -90,43 +104,62 @@ static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
return -ENOMEM;
}
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
- bmap->bitmap);
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
return 0;
}
static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 *id_num)
{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
-
*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
-
- if (*id_num >= bmap->max_count) {
- DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
- bmap->max_count);
+ if (*id_num >= bmap->max_count)
return -EINVAL;
- }
__set_bit(*id_num, bmap->bitmap);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
return 0;
}
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
bool b_acquired;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
if (id_num >= bmap->max_count)
return;
b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
if (!b_acquired) {
- DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
return;
}
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -170,7 +203,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Queue zone lines are shared between RoCE and L2 in such a way that
* they can be used by each without obstructing the other.
*/
- p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -191,7 +225,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate pd_map, rc = %d\n",
@@ -201,7 +236,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count);
+ p_hwfn->dpi_count, "DPI");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc);
@@ -212,7 +247,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "CQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cq bitmap, rc = %d\n", rc);
@@ -224,7 +259,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* The maximum number of CQs is bounded to twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate toogle bits, rc = %d\n", rc);
@@ -233,7 +268,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate bitmap for itids */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs);
+ p_rdma_info->num_mrs, "MR");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate itids bitmaps, rc = %d\n", rc);
@@ -241,16 +276,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cid bitmap, rc = %d\n", rc);
goto free_tid_map;
}
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
kfree(p_rdma_info->tid_map.bitmap);
free_toggle_map:
@@ -271,16 +317,79 @@ free_rdma_info:
return rc;
}
+static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ int wait_count = 0;
- kfree(p_rdma_info->cid_map.bitmap);
- kfree(p_rdma_info->tid_map.bitmap);
- kfree(p_rdma_info->toggle_bits.bitmap);
- kfree(p_rdma_info->cq_map.bitmap);
- kfree(p_rdma_info->dpi_map.bitmap);
- kfree(p_rdma_info->pd_map.bitmap);
+ /* when destroying a_RoCE QP the control is returned to the user after
+ * the synchronous part. The asynchronous part may take a little longer.
+ * We delay for a short while if an async destroy QP is still expected.
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+ break;
+ }
+ }
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -675,6 +784,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
((out_params->dpi) * p_hwfn->dpi_size);
out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
return rc;
@@ -693,6 +803,8 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
return p_port;
}
@@ -724,6 +836,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
u32 addr;
p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -737,9 +857,12 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
struct qed_dev_rdma_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
memset(info, 0, sizeof(*info));
info->rdma_type = QED_RDMA_TYPE_ROCE;
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
qed_fill_dev_info(cdev, &info->common);
@@ -887,8 +1010,7 @@ static int qed_rdma_create_cq(void *rdma_cxt,
/* Allocate icid */
spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_info->cq_map, &returned_id);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
spin_unlock_bh(&p_info->lock);
if (rc) {
@@ -1080,6 +1202,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,15 +1269,22 @@ err:
return rc;
}
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1366,16 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1391,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
- rc, physical_queue0);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "rc = %d regular physical queue = 0x%x\n", rc,
+ regular_latency_queue);
if (rc)
goto err;
qp->resp_offloaded = true;
+ qp->cq_prod = 0;
+
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn, qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1277,10 +1421,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1495,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
- p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
- qp->sq_cq_id);
+ p_ramrod->cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1523,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
goto err;
qp->req_offloaded = true;
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn,
+ qp->icid + 1 -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1577,7 +1726,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
- u32 *num_invalidated_mw)
+ u32 *num_invalidated_mw,
+ u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1738,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
- if (!qp->resp_offloaded)
+ *num_invalidated_mw = 0;
+ *cq_prod = qp->cq_prod;
+
+ if (!qp->resp_offloaded) {
+ /* If a responder was never offload, we need to free the cids
+ * allocated in create_qp as a FW async event will never arrive
+ */
+ u32 cid;
+
+ cid = qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
return 0;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1788,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+ *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+ qp->cq_prod = *cq_prod;
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1993,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
out_params->draining = false;
- if (rq_err_state)
+ if (rq_err_state || sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
- else if (sq_err_state)
- qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
@@ -1849,10 +2013,9 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
- u32 start_cid;
+ u32 cq_prod;
int rc;
/* Destroys the specified QP */
@@ -1866,7 +2029,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
+ &num_invalidated_mw,
+ &cq_prod);
if (rc)
return rc;
@@ -1881,21 +2045,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
-
- spin_lock_bh(&p_rdma_info->lock);
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_rdma_info->proto);
-
- /* Release responder's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid - start_cid);
-
- /* Release requester's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid + 1 - start_cid);
-
- spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
@@ -2097,8 +2246,7 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
params->modify_flags);
return rc;
- } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
- qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
/* ->ERR */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
params->modify_flags);
@@ -2110,12 +2258,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
+ u32 cq_prod;
+
+ /* Send destroy responder ramrod */
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+ qp,
+ &num_invalidated_mw,
+ &cq_prod);
- rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
if (rc)
return rc;
+ qp->cq_prod = cq_prod;
+
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
@@ -2357,6 +2512,8 @@ qed_rdma_register_tid(void *rdma_cxt,
}
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
if (fw_return_code != RDMA_RETURN_OK) {
DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
@@ -2454,6 +2611,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
return rc;
}
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 start_cid, cid, xcid;
+
+ /* an even icid belongs to a responder while an odd icid belongs to a
+ * requester. The 'cid' received as an input can be either. We calculate
+ * the "partner" icid and call it xcid. Only if both are free then the
+ * "cid" map can be cleared.
+ */
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+ cid = icid - start_cid;
+ xcid = cid ^ 1;
+
+ spin_lock_bh(&p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+ if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+ }
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2955,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2ab7fa..9742af516183 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -67,9 +67,11 @@ enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_SET = 1
};
+#define QED_RDMA_MAX_BMAP_NAME (10)
struct qed_bmap {
unsigned long *bitmap;
u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
};
struct qed_rdma_info {
@@ -82,6 +84,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map;
struct qed_bmap srq_map;
struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
struct qed_bmap dpi_map;
struct qed_bmap toggle_bits;
struct qed_rdma_events events;
@@ -92,6 +95,7 @@ struct qed_rdma_info {
u32 num_qps;
u32 num_mrs;
u16 queue_zone_base;
+ u16 max_queue_zones;
enum protocol_type proto;
};
@@ -153,6 +157,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
+ u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -163,8 +168,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
@@ -187,7 +192,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet);
#else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 30393ffaa8e5..3357bbefa445 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -84,6 +84,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -408,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
/**
@@ -441,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 6fb80f9ef446..bc3694e91b85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
-static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case QED_TUNN_CLSS_MAC_VLAN:
@@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
return TUNNEL_CLSS_INNER_MAC_VLAN;
case QED_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
+ case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
-qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src, bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = type;
+}
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
+static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
+static void
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct qed_tunn_update_udp_port *p_udp_port)
+{
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = cpu_to_le16(p_udp_port->port);
}
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+
+ qed_set_pf_update_tunn_mode(p_tun, p_src, false);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct qed_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
+ qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
-
- qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ if (p_tunn->vxlan_port.b_update_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_src,
+ struct qed_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
if (!p_src)
return;
- tunn_mode = p_src->tunn_mode;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ qed_set_pf_update_tunn_mode(p_tun, p_src, true);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
@@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (p_tunn) {
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
@@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
@@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (!p_tunn)
+ return -EINVAL;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
@@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a9f0cf..f6423a139ca0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
+ struct qed_ptt *p_ptt;
int rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
if (!rc)
return 0;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+ return -EAGAIN;
+ }
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- return 0;
+ goto out;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1) {
+ if (comp_done->done == 1)
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
- return 0;
- }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
err:
+ qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -205,11 +215,10 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- u16 pq;
- struct qed_cxt_info cxt_info;
- struct core_conn_context *p_cxt;
- union qed_qm_pq_params pq_params;
- int rc;
+ struct core_conn_context *p_cxt;
+ struct qed_cxt_info cxt_info;
+ u16 physical_q;
+ int rc;
cxt_info.iid = p_spq->cid;
@@ -231,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -296,9 +303,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
- qed_async_roce_event(p_hwfn, p_eqe);
+ qed_roce_async_event(p_hwfn, p_eqe->opcode,
+ &p_eqe->data.rdma_data);
return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -306,14 +316,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
- if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
- u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
- qed_ooo_release_connection_isles(p_hwfn,
- p_hwfn->p_ooo_info,
- cid);
- return 0;
- }
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 253c2bbe1e4e..d5df29f787c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+enum qed_iov_validate_q_mode {
+ QED_IOV_VALIDATE_Q_NA,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 qid,
+ enum qed_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ if (mode == QED_IOV_VALIDATE_Q_NA)
+ return true;
+
+ if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+ (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+ return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 rx_qid)
+ struct qed_vf_info *p_vf,
+ u16 rx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
}
static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 tx_qid)
+ struct qed_vf_info *p_vf,
+ u16 tx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
}
static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -557,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only by later
+ * to diffrentiate between the two.
*/
- cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (QED_PATH_ID(p_hwfn))
- cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -677,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
int rc;
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
+
if (vf->to_disable)
return 0;
@@ -689,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -1118,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
- qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
- mbx->req_virt->first_tlv.reply_address,
- sizeof(u64) / 4, &params);
-
+ /* Once PF copies the rc to the VF, the latter can continue
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
}
static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1733,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1746,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
- qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1803,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (qed_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn,
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
@@ -1814,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->configured_features = 0;
memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
@@ -1870,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_rxq;
- if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -1923,6 +2019,220 @@ out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
+static void
+qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct qed_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ enum qed_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & BIT(mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & BIT(mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ struct qed_tunn_update_udp_port *p_port,
+ enum qed_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
+{
+ if (tun->b_update_mode && !tun->b_mode_enabled) {
+ tun->b_update_mode = false;
+ *rc = -EINVAL;
+ }
+}
+
+static int
+qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
+ u16 *tun_features, bool *update,
+ struct qed_tunnel_info *tun_src)
+{
+ struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
+ struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
+ u16 bultn_vxlan_port, bultn_geneve_port;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ int i, rc = 0;
+
+ *tun_features = p_hwfn->cdev->tunn_feature_mask;
+ bultn_vxlan_port = tun->vxlan_port.port;
+ bultn_geneve_port = tun->geneve_port.port;
+ qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
+
+ if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
+ (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
+ tun_src->b_update_rx_cls = false;
+ tun_src->b_update_tx_cls = false;
+ rc = -EINVAL;
+ }
+
+ if (tun_src->vxlan_port.b_update_port) {
+ if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
+ tun_src->vxlan_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_vxlan_port = tun_src->vxlan_port.port;
+ }
+ }
+
+ if (tun_src->geneve_port.b_update_port) {
+ if (tun_src->geneve_port.port == tun->geneve_port.port) {
+ tun_src->geneve_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_geneve_port = tun_src->geneve_port.port;
+ }
+ }
+
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
+ bultn_geneve_port);
+ }
+
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct qed_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i, rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ memset(&tunn, 0, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!qed_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ QED_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If QED client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf, u8 status)
@@ -1970,21 +2280,16 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union qed_qm_pq_params pq_params;
struct vfpf_start_txq_tlv *req;
struct qed_vf_q_info *p_queue;
int rc;
u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -2004,7 +2309,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
if (!p_queue->p_tx_cid)
goto out;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
+ pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
@@ -2021,57 +2326,53 @@ out:
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
- u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+ u16 rxq_id, bool cqe_completion)
{
struct qed_vf_q_info *p_queue;
int rc = 0;
- int qid;
- if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+ vf->relative_vf_id, rxq_id);
return -EINVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- p_queue = &vf->vf_queues[qid];
+ p_queue = &vf->vf_queues[rxq_id];
- if (!p_queue->p_rx_cid)
- continue;
-
- rc = qed_eth_rx_queue_stop(p_hwfn,
- p_queue->p_rx_cid,
- false, cqe_completion);
- if (rc)
- return rc;
+ rc = qed_eth_rx_queue_stop(p_hwfn,
+ p_queue->p_rx_cid,
+ false, cqe_completion);
+ if (rc)
+ return rc;
- vf->vf_queues[qid].p_rx_cid = NULL;
- vf->num_active_rxqs--;
- }
+ p_queue->p_rx_cid = NULL;
+ vf->num_active_rxqs--;
- return rc;
+ return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+ struct qed_vf_info *vf, u16 txq_id)
{
- int rc = 0;
struct qed_vf_q_info *p_queue;
- int qid;
+ int rc = 0;
- if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+ QED_IOV_VALIDATE_Q_ENABLE))
return -EINVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- p_queue = &vf->vf_queues[qid];
- if (!p_queue->p_tx_cid)
- continue;
+ p_queue = &vf->vf_queues[txq_id];
- rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
- if (rc)
- return rc;
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ if (rc)
+ return rc;
- p_queue->p_tx_cid = NULL;
- }
+ p_queue->p_tx_cid = NULL;
- return rc;
+ return 0;
}
static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2080,20 +2381,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->cqe_completion);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
@@ -2104,19 +2413,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_txqs;
- rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
@@ -2141,22 +2458,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
/* Validate inputs */
- if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
- !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
- DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
- vf->relative_vf_id, req->rx_qid, req->num_rxqs);
- goto out;
- }
-
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
- if (!vf->vf_queues[qid].p_rx_cid) {
- DP_INFO(p_hwfn,
- "VF[%d] rx_qid = %d isn`t active!\n",
- vf->relative_vf_id, qid);
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+ if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid, req->num_rxqs);
goto out;
}
+ /* Prepare the handlers */
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
handlers[i] = vf->vf_queues[qid].p_rx_cid;
}
@@ -2372,7 +2684,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
for (i = 0; i < table_size; i++) {
q_idx = p_rss_tlv->rss_ind_table[i];
- if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Omitting RSS due to wrong queue %04x\n",
@@ -2381,15 +2694,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
goto out;
}
- if (!vf->vf_queues[q_idx].p_rx_cid) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "VF[%d]: Omitting RSS due to inactive queue %08x\n",
- vf->relative_vf_id, q_idx);
- b_reject = true;
- goto out;
- }
-
p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
@@ -3042,9 +3346,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3054,7 +3359,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->cdev->p_iov_info) {
DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3083,7 +3388,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3184,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -3289,11 +3597,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->err_id);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
}
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
@@ -3414,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3842,6 +4179,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
+ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
struct qed_mcp_link_capabilities caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
@@ -3858,9 +4196,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
if (!vf_info)
continue;
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ /* Only hwfn0 is actually interested in the link speed.
+ * But since only it would receive an MFW indication of link,
+ * need to take configuration from it - otherwise things like
+ * rate limiting for hwfn1 VF would not work.
+ */
+ memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
sizeof(caps));
/* Modify link according to the VF's configured link state */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index a89605821522..81a497ce6585 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -270,6 +270,9 @@ enum qed_iov_wq_flag {
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
@@ -348,9 +351,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
@@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
return MAX_NUM_VFS;
}
+static inline void
+qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port)
+{
+}
+
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
@@ -407,10 +416,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
- u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
{
- return 0;
+ return false;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 15d2855ec563..11d71e5eea14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
}
if (!*done) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF <-- PF Timeout [Type %d]\n",
- p_req->first_tlv.tl.type);
+ DP_NOTICE(p_hwfn,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
rc = -EBUSY;
} else {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "PF response: %d [Type %d]\n",
- *done, p_req->first_tlv.tl.type);
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
}
return rc;
@@ -228,7 +234,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
@@ -412,6 +418,155 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+static void
+__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= BIT(mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= BIT(mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+static void
+qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask,
+ u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode,
+ u8 tunn_cls, enum qed_tunn_mode val)
+{
+ if (feature_mask & BIT(val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ QED_MODE_VXLAN_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ QED_MODE_L2GENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ QED_MODE_IPGENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ QED_MODE_L2GRE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ QED_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
+}
+
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ int rc;
+
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ QED_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = -EINVAL;
+ }
+
+ qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
int
qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -1245,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
return true;
}
+static void
+qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port)
+{
+ struct qed_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
@@ -1264,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie;
+ u16 vxlan_port, geneve_port;
+ qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && cookie)
ops->force_mac(cookie, mac, !!is_mac_forced);
+ ops->ports_update(cookie, vxlan_port, geneve_port);
+
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 7da0b165d8bc..34ac70b0e5fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
@@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_txqs;
u8 padding[5];
};
@@ -425,6 +429,43 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
@@ -440,6 +481,7 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -449,6 +491,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
enum qed_bulletin_bit {
@@ -509,7 +552,9 @@ struct qed_bulletin_content {
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -551,6 +596,7 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -868,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work);
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -1029,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
static inline void qed_iov_vf_task(struct work_struct *work)
{
}
+
+static inline void
+qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+}
+
+static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2cfb86..9b4f08b6f9b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -41,6 +41,9 @@
#include <linux/mutex.h>
#include <linux/bpf.h>
#include <linux/io.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
@@ -50,7 +53,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 20
+#define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -58,7 +61,7 @@
#define DRV_MODULE_SYM qede
-struct qede_stats {
+struct qede_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -90,11 +93,6 @@ struct qede_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -111,17 +109,39 @@ struct qede_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
- u64 tx_pause_frames;
- u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
- u64 brb_truncates;
- u64 brb_discards;
- u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+ struct qede_stats_common common;
+
+ union {
+ struct qede_stats_bb bb;
+ struct qede_stats_ah ah;
+ };
};
struct qede_vlan {
@@ -147,10 +167,11 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
- u32 flags;
-#define QEDE_FLAG_IS_VF BIT(0)
+ unsigned long flags;
+#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
#define QEDE_TX_TIMESTAMPING_EN BIT(1)
+#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
@@ -158,6 +179,10 @@ struct qede_dev {
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
u8 req_num_tx;
@@ -216,7 +241,10 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
- bool wol_enabled;
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_arfs *arfs;
+#endif
+ bool wol_enabled;
struct qede_rdma_dev rdma_info;
@@ -292,21 +320,24 @@ struct qede_rx_queue {
u8 data_direction;
u8 rxq_id;
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
+ u16 rx_headroom;
+
u32 rx_buf_size;
u32 rx_buf_seg_size;
- u64 rcv_pkts;
-
struct sw_rx_data *sw_rx_ring;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
- /* Used once per each NAPI run */
- u16 num_rx_buffers;
-
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ /* Used once per each NAPI run */
+ u64 rcv_pkts;
+
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -328,6 +359,11 @@ struct sw_tx_bd {
#define QEDE_TSO_SPLIT_BD BIT(0)
};
+struct sw_tx_xdp {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
struct qede_tx_queue {
u8 is_xdp;
bool is_legacy;
@@ -351,11 +387,11 @@ struct qede_tx_queue {
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose,
- * while XDP requires only the pages themselves.
+ * while XDP requires the pages and the mapped address.
*/
union {
struct sw_tx_bd *skbs;
- struct page **pages;
+ struct sw_tx_xdp *xdp;
} sw_tx_ring;
struct qed_chain tx_pbl;
@@ -407,8 +443,20 @@ struct qede_fastpath {
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
#define QEDE_SP_RX_MODE 1
-#define QEDE_SP_VXLAN_PORT_CONFIG 2
-#define QEDE_SP_GENEVE_PORT_CONFIG 3
+
+#ifdef CONFIG_RFS_ACCEL
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
+void qede_free_arfs(struct qede_dev *edev);
+int qede_alloc_arfs(struct qede_dev *edev);
+
+#define QEDE_SP_ARFS_CONFIG 4
+#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
+#define QEDE_RFS_MAX_FLTR 256
+#endif
struct qede_reload_args {
void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
@@ -433,6 +481,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
/* Filtering function definitions */
void qede_force_mac(void *dev, u8 *mac, bool forced);
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
int qede_set_mac_addr(struct net_device *ndev, void *p);
int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index 03e8c0212433..a9e7379313db 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -281,6 +281,11 @@ static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
struct dcb_app *app)
{
struct qede_dev *edev = netdev_priv(netdev);
+ int err;
+
+ err = dcb_ieee_setapp(netdev, app);
+ if (err)
+ return err;
return edev->ops->dcb->ieee_setapp(edev->cdev, app);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 897953133245..4dcfe9614731 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -75,16 +75,33 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+ (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+ {QEDE_STAT_OFFSET(stat_name, type, base), \
+ QEDE_STAT_STRING(stat_name), \
+ attr}
+#define QEDE_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+ BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_bb, \
+ offsetof(struct qede_stats, bb), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_ah, \
+ offsetof(struct qede_stats, ah), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
- bool pf_only;
+ unsigned long attr;
+#define QEDE_STAT_PF_ONLY 0
+#define QEDE_STAT_BB_ONLY 1
+#define QEDE_STAT_AH_ONLY 2
} qede_stats_arr[] = {
QEDE_STAT(rx_ucast_bytes),
QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
QEDE_PF_STAT(rx_256_to_511_byte_packets),
QEDE_PF_STAT(rx_512_to_1023_byte_packets),
QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
- QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
QEDE_PF_STAT(tx_256_to_511_byte_packets),
QEDE_PF_STAT(tx_512_to_1023_byte_packets),
QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+ QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
QEDE_PF_STAT(rx_mac_crtl_frames),
QEDE_PF_STAT(tx_mac_ctrl_frames),
QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
QEDE_PF_STAT(rx_jabbers),
QEDE_PF_STAT(rx_undersize_packets),
QEDE_PF_STAT(rx_fragments),
- QEDE_PF_STAT(tx_lpi_entry_count),
- QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_BB_STAT(tx_lpi_entry_count),
+ QEDE_PF_BB_STAT(tx_total_collisions),
QEDE_PF_STAT(brb_truncates),
QEDE_PF_STAT(brb_discards),
QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
enum {
QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
}
}
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+ return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+ (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+ (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
/* Account for non-queue statistics */
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
strcpy(buf, qede_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
*buf = *((u64 *)(((void *)&edev->stats) +
qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
static int qede_get_sset_count(struct net_device *dev, int stringset)
{
struct qede_dev *edev = netdev_priv(dev);
- int num_stats = QEDE_NUM_STATS;
+ int num_stats = QEDE_NUM_STATS, i;
switch (stringset) {
case ETH_SS_STATS:
- if (IS_VF(edev)) {
- int i;
-
- for (i = 0; i < QEDE_NUM_STATS; i++)
- if (qede_stats_arr[i].pf_only)
- num_stats--;
- }
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_is_irrelevant_stat(edev, i))
+ num_stats--;
/* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 107c3fda4792..eb5652073ca8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -38,6 +38,459 @@
#include <linux/qed/qed_if.h>
#include "qede.h"
+#ifdef CONFIG_RFS_ACCEL
+struct qede_arfs_tuple {
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 eth_proto;
+ u8 ip_proto;
+};
+
+struct qede_arfs_fltr_node {
+#define QEDE_FLTR_VALID 0
+ unsigned long state;
+
+ /* pointer to aRFS packet buffer */
+ void *data;
+
+ /* dma map address of aRFS packet buffer */
+ dma_addr_t mapping;
+
+ /* length of aRFS packet buffer */
+ int buf_len;
+
+ /* tuples to hold from aRFS packet buffer */
+ struct qede_arfs_tuple tuple;
+
+ u32 flow_id;
+ u16 sw_id;
+ u16 rxq_id;
+ u16 next_rxq_id;
+ bool filter_op;
+ bool used;
+ struct hlist_node node;
+};
+
+struct qede_arfs {
+#define QEDE_ARFS_POLL_COUNT 100
+#define QEDE_RFS_FLW_BITSHIFT (4)
+#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
+ struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
+
+ /* lock for filter list access */
+ spinlock_t arfs_list_lock;
+ unsigned long *arfs_fltr_bmap;
+ int filter_count;
+ bool enable;
+};
+
+static void qede_configure_arfs_fltr(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ u16 rxq_id, bool add_fltr)
+{
+ const struct qed_eth_ops *op = edev->ops;
+
+ if (n->used)
+ return;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+ "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ add_fltr ? "Adding" : "Deleting",
+ n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
+ ntohs(n->tuple.dst_port), rxq_id);
+
+ n->used = true;
+ n->filter_op = add_fltr;
+ op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
+ rxq_id, add_fltr);
+}
+
+static void
+qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
+{
+ kfree(fltr->data);
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+ kfree(fltr);
+}
+
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
+{
+ struct qede_arfs_fltr_node *fltr = filter;
+ struct qede_dev *edev = dev;
+
+ if (fw_rc) {
+ DP_NOTICE(edev,
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ fw_rc, fltr->flow_id, fltr->sw_id,
+ ntohs(fltr->tuple.src_port),
+ ntohs(fltr->tuple.dst_port), fltr->rxq_id);
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return;
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+
+ if (fltr->filter_op) {
+ set_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id)
+ qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
+ false);
+ } else {
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id) {
+ fltr->rxq_id = fltr->next_rxq_id;
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id, true);
+ }
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* Should be called while qede_lock is held */
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+{
+ int i;
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
+ struct hlist_node *temp;
+ struct hlist_head *head;
+ struct qede_arfs_fltr_node *fltr;
+
+ head = &edev->arfs->arfs_hl_head[i];
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ bool del = false;
+
+ if (edev->state != QEDE_STATE_OPEN)
+ del = true;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
+ !fltr->used) || free_fltr) {
+ hlist_del(&fltr->node);
+ dma_unmap_single(&edev->pdev->dev,
+ fltr->mapping,
+ fltr->buf_len, DMA_TO_DEVICE);
+ qede_free_arfs_filter(edev, fltr);
+ edev->arfs->filter_count--;
+ } else {
+ if ((rps_may_expire_flow(edev->ndev,
+ fltr->rxq_id,
+ fltr->flow_id,
+ fltr->sw_id) || del) &&
+ !free_fltr)
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id,
+ false);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ }
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if (!edev->arfs->filter_count) {
+ if (edev->arfs->enable) {
+ edev->arfs->enable = false;
+ edev->ops->configure_arfs_searcher(edev->cdev, false);
+ }
+ } else {
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task,
+ QEDE_SP_TASK_POLL_DELAY);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* This function waits until all aRFS filters get deleted and freed.
+ * On timeout it frees all filters forcefully.
+ */
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
+{
+ int count = QEDE_ARFS_POLL_COUNT;
+
+ while (count) {
+ qede_process_arfs_filters(edev, false);
+
+ if (!edev->arfs->filter_count)
+ break;
+
+ msleep(100);
+ count--;
+ }
+
+ if (!count) {
+ DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
+
+ /* Something is terribly wrong, free forcefully */
+ qede_process_arfs_filters(edev, true);
+ }
+}
+
+int qede_alloc_arfs(struct qede_dev *edev)
+{
+ int i;
+
+ edev->arfs = vzalloc(sizeof(*edev->arfs));
+ if (!edev->arfs)
+ return -ENOMEM;
+
+ spin_lock_init(&edev->arfs->arfs_list_lock);
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
+ INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
+
+ edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
+ if (!edev->ndev->rx_cpu_rmap) {
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
+ sizeof(long));
+ if (!edev->arfs->arfs_fltr_bmap) {
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qede_free_arfs(struct qede_dev *edev)
+{
+ if (!edev->arfs)
+ return;
+
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs->arfs_fltr_bmap);
+ edev->arfs->arfs_fltr_bmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+}
+
+static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
+ tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ else
+ return false;
+ } else {
+ struct in6_addr *src = &tpos->tuple.src_ipv6;
+ u8 size = sizeof(struct in6_addr);
+
+ if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
+ !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
+ return true;
+ else
+ return false;
+ }
+}
+
+static struct qede_arfs_fltr_node *
+qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
+ __be16 src_port, __be16 dst_port, u8 ip_proto)
+{
+ struct qede_arfs_fltr_node *tpos;
+
+ hlist_for_each_entry(tpos, h, node)
+ if (tpos->tuple.ip_proto == ip_proto &&
+ tpos->tuple.eth_proto == skb->protocol &&
+ qede_compare_ip_addr(tpos, skb) &&
+ tpos->tuple.src_port == src_port &&
+ tpos->tuple.dst_port == dst_port)
+ return tpos;
+
+ return NULL;
+}
+
+static struct qede_arfs_fltr_node *
+qede_alloc_filter(struct qede_dev *edev, int min_hlen)
+{
+ struct qede_arfs_fltr_node *n;
+ int bit_id;
+
+ bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
+ QEDE_RFS_MAX_FLTR);
+
+ if (bit_id >= QEDE_RFS_MAX_FLTR)
+ return NULL;
+
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n)
+ return NULL;
+
+ n->data = kzalloc(min_hlen, GFP_ATOMIC);
+ if (!n->data) {
+ kfree(n);
+ return NULL;
+ }
+
+ n->sw_id = (u16)bit_id;
+ set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
+ return n;
+}
+
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc, tp_offset;
+ struct ethhdr *eth;
+ __be16 *ports;
+ u16 tbl_idx;
+ u8 ip_proto;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_proto = ip_hdr(skb)->protocol;
+ tp_offset = sizeof(struct iphdr);
+ } else {
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ tp_offset = sizeof(struct ipv6hdr);
+ }
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ ports = (__be16 *)(skb->data + tp_offset);
+ tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
+ skb, ports[0], ports[1], ip_proto);
+
+ if (n) {
+ /* Filter match */
+ n->next_rxq_id = rxq_index;
+
+ if (test_bit(QEDE_FLTR_VALID, &n->state)) {
+ if (n->rxq_id != rxq_index)
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ false);
+ } else {
+ if (!n->used) {
+ n->rxq_id = rxq_index;
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ true);
+ }
+ }
+
+ rc = n->sw_id;
+ goto ret_unlock;
+ }
+
+ min_hlen = ETH_HLEN + skb_headlen(skb);
+
+ n = qede_alloc_filter(edev, min_hlen);
+ if (!n) {
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ n->buf_len = min_hlen;
+ n->rxq_id = rxq_index;
+ n->next_rxq_id = rxq_index;
+ n->tuple.src_port = ports[0];
+ n->tuple.dst_port = ports[1];
+ n->flow_id = flow_id;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
+ n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+
+ eth = (struct ethhdr *)n->data;
+ eth->h_proto = skb->protocol;
+ n->tuple.eth_proto = skb->protocol;
+ n->tuple.ip_proto = ip_proto;
+ memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+
+ n->mapping = dma_map_single(&edev->pdev->dev, n->data,
+ n->buf_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
+ DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
+ qede_free_arfs_filter(edev, n);
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ INIT_HLIST_NODE(&n->node);
+ hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
+ edev->arfs->filter_count++;
+
+ if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
+ edev->ops->configure_arfs_searcher(edev->cdev, true);
+ edev->arfs->enable = true;
+ }
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ return n->sw_id;
+
+ret_unlock:
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return rc;
+}
+#endif
+
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->vxlan_dst_port != vxlan_port)
+ edev->vxlan_dst_port = 0;
+
+ if (edev->geneve_dst_port != geneve_port)
+ edev->geneve_dst_port = 0;
+}
+
void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
@@ -441,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ int rc;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
+ if (!edev->dev_info.common.vxlan_enable)
+ return;
+
if (edev->vxlan_dst_port)
return;
- edev->vxlan_dst_port = t_port;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
- t_port);
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->vxlan_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+ t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
+ t_port);
+ }
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
+ if (!edev->dev_info.common.geneve_enable)
+ return;
+
if (edev->geneve_dst_port)
return;
- edev->geneve_dst_port = t_port;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = t_port;
+
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->geneve_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Added geneve port=%d\n", t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
+ t_port);
+ }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
- t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
-void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ memset(&tunn_params, 0, sizeof(tunn_params));
+
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
if (t_port != edev->vxlan_dst_port)
return;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->vxlan_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
if (t_port != edev->geneve_dst_port)
return;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->geneve_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
static void qede_xdp_reload_func(struct qede_dev *edev,
@@ -520,11 +1016,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
{
struct qede_reload_args args;
- if (prog && prog->xdp_adjust_head) {
- DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
-
/* If we're called, there was already a bpf reference increment */
args.func = &qede_xdp_reload_func;
args.u.new_prog = prog;
@@ -537,6 +1028,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
struct qede_dev *edev = netdev_priv(dev);
+ if (IS_VF(edev)) {
+ DP_NOTICE(edev, "VFs don't support XDP\n");
+ return -EOPNOTSUPP;
+ }
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1e65038c8fc0..7b6f41d06245 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
WARN_ON(!rx_bd);
rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
rxq->filled_buffers++;
@@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
metadata->mapping + padding,
length, PCI_DMA_TODEVICE);
- txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_ring.xdp[idx].page = metadata->data;
+ txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++;
/* Mark the fastpath for future XDP doorbell */
@@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- struct eth_tx_1st_bd *bd;
- u16 hw_bd_cons;
+ u16 hw_bd_cons, idx;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ qed_chain_consume(&txq->tx_pbl);
+ idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
- NUM_TX_BDS_MAX]);
+ dma_unmap_page(&edev->pdev->dev,
+ txq->sw_tx_ring.xdp[idx].mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++;
txq->xmit_pkts++;
@@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
new_mapping = curr_prod->mapping + curr_prod->page_offset;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
curr_cons->data = NULL;
@@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
- rxq->rcv_pkts++;
}
static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,9 +886,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
"Strange - TPA cont with more than a single len_list entry\n");
}
-static void qede_tpa_end(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct eth_fast_path_rx_tpa_end_cqe *cqe)
+static int qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_agg_info *tpa_info;
@@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev,
tpa_info->state = QEDE_AGG_STATE_NONE;
- return;
+ return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
+ return 0;
}
static u8 qede_check_notunn_csum(u16 flag)
@@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct bpf_prog *prog,
struct sw_rx_data *bd,
- struct eth_fast_path_rx_reg_cqe *cqe)
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 *data_offset, u16 *len)
{
- u16 len = le16_to_cpu(cqe->len_on_first_bd);
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data = page_address(bd->data) + cqe->placement_offset;
- xdp.data_end = xdp.data + len;
+ xdp.data_hard_start = page_address(bd->data);
+ xdp.data = xdp.data_hard_start + *data_offset;
+ xdp.data_end = xdp.data + *len;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
act = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
+ /* Recalculate, as XDP might have changed the headers */
+ *data_offset = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+
if (act == XDP_PASS)
return true;
@@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
/* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated.
*/
- if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(bd->data);
@@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
struct sw_rx_data *bd, u16 len,
u16 pad)
{
- unsigned int offset = bd->page_offset;
+ unsigned int offset = bd->page_offset + pad;
struct skb_frag_struct *frag;
struct page *page = bd->data;
unsigned int pull_len;
@@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
*/
if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
- page_address(page) + pad + offset, len);
+ page_address(page) + offset, len);
qede_reuse_page(rxq, bd);
goto out;
}
@@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
frag = &skb_shinfo(skb)->frags[0];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, pad + offset, len, rxq->rx_buf_seg_size);
+ page, offset, len, rxq->rx_buf_seg_size);
va = skb_frag_address(frag);
pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
@@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
return 0;
case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
- return 1;
+ return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
default:
return 0;
}
@@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
fp_cqe = &cqe->fast_path_regular;
len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
+ pad = fp_cqe->placement_offset + rxq->rx_headroom;
/* Run eBPF program if one is attached */
if (xdp_prog)
- if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
- return 1;
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+ &pad, &len))
+ return 0;
/* If this is an error packet then drop it */
flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_dev *edev = fp->edev;
+ int work_done = 0, rcv_pkts = 0;
u16 hw_comp_cons, sw_comp_cons;
- int work_done = 0;
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
/* Loop to complete all indicated BDs */
while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
- qede_rx_process_cqe(edev, fp, rxq);
+ rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
work_done++;
}
+ rxq->rcv_pkts += rcv_pkts;
+
/* Allocate replacement buffers */
while (rxq->num_rx_buffers - rxq->filled_buffers)
if (qede_alloc_rx_buffer(rxq, false))
@@ -1687,13 +1697,24 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
}
/* Disable offloads for geneve tunnels, as HW can't parse
- * the geneve header which has option length greater than 32B.
+ * the geneve header which has option length greater than 32b
+ * and disable offloads for the ports which are not offloaded.
*/
- if ((l4_proto == IPPROTO_UDP) &&
- ((skb_inner_mac_header(skb) -
- skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
- return features & ~(NETIF_F_CSUM_MASK |
- NETIF_F_GSO_MASK);
+ if (l4_proto == IPPROTO_UDP) {
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 hdrlen, vxln_port, gnv_port;
+
+ hdrlen = QEDE_MAX_TUN_HDR_LEN;
+ vxln_port = edev->vxlan_dst_port;
+ gnv_port = edev->geneve_dst_port;
+
+ if ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > hdrlen ||
+ (ntohs(udp_hdr(skb)->dest) != vxln_port &&
+ ntohs(udp_hdr(skb)->dest) != gnv_port))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
}
return features;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 3a78c3f25157..b9ba23d71c61 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_AH 0x8070
+#define CHIP_NUM_AH_IOV 0x8090
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
+
#endif
enum qede_pci_private {
@@ -110,6 +115,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -216,9 +225,13 @@ static struct pci_driver qede_pci_driver = {
static struct qed_eth_cb_ops qede_ll_ops = {
{
+#ifdef CONFIG_RFS_ACCEL
+ .arfs_filter_op = qede_arfs_filter_op,
+#endif
.link_update = qede_link_update,
},
.force_mac = qede_force_mac,
+ .ports_update = qede_udp_ports_update,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -314,122 +327,135 @@ static int qede_close(struct net_device *ndev);
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
+ struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats);
- edev->stats.no_buff_discards = stats.no_buff_discards;
- edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
- edev->stats.ttl0_discard = stats.ttl0_discard;
- edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
- edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
- edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
- edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
- edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
- edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
- edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
- edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
- edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
- edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
- edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
- edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
- edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
- edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
- edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
- edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
- edev->stats.coalesced_events = stats.tpa_coalesced_events;
- edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
- edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
- edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
- edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
- edev->stats.rx_128_to_255_byte_packets =
- stats.rx_128_to_255_byte_packets;
- edev->stats.rx_256_to_511_byte_packets =
- stats.rx_256_to_511_byte_packets;
- edev->stats.rx_512_to_1023_byte_packets =
- stats.rx_512_to_1023_byte_packets;
- edev->stats.rx_1024_to_1518_byte_packets =
- stats.rx_1024_to_1518_byte_packets;
- edev->stats.rx_1519_to_1522_byte_packets =
- stats.rx_1519_to_1522_byte_packets;
- edev->stats.rx_1519_to_2047_byte_packets =
- stats.rx_1519_to_2047_byte_packets;
- edev->stats.rx_2048_to_4095_byte_packets =
- stats.rx_2048_to_4095_byte_packets;
- edev->stats.rx_4096_to_9216_byte_packets =
- stats.rx_4096_to_9216_byte_packets;
- edev->stats.rx_9217_to_16383_byte_packets =
- stats.rx_9217_to_16383_byte_packets;
- edev->stats.rx_crc_errors = stats.rx_crc_errors;
- edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
- edev->stats.rx_pause_frames = stats.rx_pause_frames;
- edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
- edev->stats.rx_align_errors = stats.rx_align_errors;
- edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
- edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
- edev->stats.rx_jabbers = stats.rx_jabbers;
- edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
- edev->stats.rx_fragments = stats.rx_fragments;
- edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
- edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
- edev->stats.tx_128_to_255_byte_packets =
- stats.tx_128_to_255_byte_packets;
- edev->stats.tx_256_to_511_byte_packets =
- stats.tx_256_to_511_byte_packets;
- edev->stats.tx_512_to_1023_byte_packets =
- stats.tx_512_to_1023_byte_packets;
- edev->stats.tx_1024_to_1518_byte_packets =
- stats.tx_1024_to_1518_byte_packets;
- edev->stats.tx_1519_to_2047_byte_packets =
- stats.tx_1519_to_2047_byte_packets;
- edev->stats.tx_2048_to_4095_byte_packets =
- stats.tx_2048_to_4095_byte_packets;
- edev->stats.tx_4096_to_9216_byte_packets =
- stats.tx_4096_to_9216_byte_packets;
- edev->stats.tx_9217_to_16383_byte_packets =
- stats.tx_9217_to_16383_byte_packets;
- edev->stats.tx_pause_frames = stats.tx_pause_frames;
- edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
- edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
- edev->stats.tx_total_collisions = stats.tx_total_collisions;
- edev->stats.brb_truncates = stats.brb_truncates;
- edev->stats.brb_discards = stats.brb_discards;
- edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+ p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+ p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+ p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+ p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+ p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+ p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+ p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+ p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+ p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+ p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+ p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+ p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+ p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+ p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+ p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+ p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+ p_common->coalesced_events = stats.common.tpa_coalesced_events;
+ p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+ p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+ p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+ p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+ p_common->rx_65_to_127_byte_packets =
+ stats.common.rx_65_to_127_byte_packets;
+ p_common->rx_128_to_255_byte_packets =
+ stats.common.rx_128_to_255_byte_packets;
+ p_common->rx_256_to_511_byte_packets =
+ stats.common.rx_256_to_511_byte_packets;
+ p_common->rx_512_to_1023_byte_packets =
+ stats.common.rx_512_to_1023_byte_packets;
+ p_common->rx_1024_to_1518_byte_packets =
+ stats.common.rx_1024_to_1518_byte_packets;
+ p_common->rx_crc_errors = stats.common.rx_crc_errors;
+ p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+ p_common->rx_pause_frames = stats.common.rx_pause_frames;
+ p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+ p_common->rx_align_errors = stats.common.rx_align_errors;
+ p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+ p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+ p_common->rx_jabbers = stats.common.rx_jabbers;
+ p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+ p_common->rx_fragments = stats.common.rx_fragments;
+ p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+ p_common->tx_65_to_127_byte_packets =
+ stats.common.tx_65_to_127_byte_packets;
+ p_common->tx_128_to_255_byte_packets =
+ stats.common.tx_128_to_255_byte_packets;
+ p_common->tx_256_to_511_byte_packets =
+ stats.common.tx_256_to_511_byte_packets;
+ p_common->tx_512_to_1023_byte_packets =
+ stats.common.tx_512_to_1023_byte_packets;
+ p_common->tx_1024_to_1518_byte_packets =
+ stats.common.tx_1024_to_1518_byte_packets;
+ p_common->tx_pause_frames = stats.common.tx_pause_frames;
+ p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+ p_common->brb_truncates = stats.common.brb_truncates;
+ p_common->brb_discards = stats.common.brb_discards;
+ p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+ if (QEDE_IS_BB(edev)) {
+ struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+ p_bb->rx_1519_to_1522_byte_packets =
+ stats.bb.rx_1519_to_1522_byte_packets;
+ p_bb->rx_1519_to_2047_byte_packets =
+ stats.bb.rx_1519_to_2047_byte_packets;
+ p_bb->rx_2048_to_4095_byte_packets =
+ stats.bb.rx_2048_to_4095_byte_packets;
+ p_bb->rx_4096_to_9216_byte_packets =
+ stats.bb.rx_4096_to_9216_byte_packets;
+ p_bb->rx_9217_to_16383_byte_packets =
+ stats.bb.rx_9217_to_16383_byte_packets;
+ p_bb->tx_1519_to_2047_byte_packets =
+ stats.bb.tx_1519_to_2047_byte_packets;
+ p_bb->tx_2048_to_4095_byte_packets =
+ stats.bb.tx_2048_to_4095_byte_packets;
+ p_bb->tx_4096_to_9216_byte_packets =
+ stats.bb.tx_4096_to_9216_byte_packets;
+ p_bb->tx_9217_to_16383_byte_packets =
+ stats.bb.tx_9217_to_16383_byte_packets;
+ p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+ p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+ } else {
+ struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+ p_ah->rx_1519_to_max_byte_packets =
+ stats.ah.rx_1519_to_max_byte_packets;
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
}
static void qede_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
+ p_common = &edev->stats.common;
- stats->rx_packets = edev->stats.rx_ucast_pkts +
- edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
- stats->tx_packets = edev->stats.tx_ucast_pkts +
- edev->stats.tx_mcast_pkts +
- edev->stats.tx_bcast_pkts;
-
- stats->rx_bytes = edev->stats.rx_ucast_bytes +
- edev->stats.rx_mcast_bytes +
- edev->stats.rx_bcast_bytes;
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
- stats->tx_bytes = edev->stats.tx_ucast_bytes +
- edev->stats.tx_mcast_bytes +
- edev->stats.tx_bcast_bytes;
+ stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
- stats->tx_errors = edev->stats.tx_err_drop_pkts;
- stats->multicast = edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
+ stats->tx_errors = p_common->tx_err_drop_pkts;
+ stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
- stats->rx_fifo_errors = edev->stats.no_buff_discards;
+ stats->rx_fifo_errors = p_common->no_buff_discards;
- stats->collisions = edev->stats.tx_total_collisions;
- stats->rx_crc_errors = edev->stats.rx_crc_errors;
- stats->rx_frame_errors = edev->stats.rx_align_errors;
+ if (QEDE_IS_BB(edev))
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
}
#ifdef CONFIG_QED_SRIOV
@@ -532,6 +558,9 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
+#endif
};
/* -------------------------------------------------------------------------
@@ -581,7 +610,8 @@ static void qede_init_ndev(struct qede_dev *edev)
{
struct net_device *ndev = edev->ndev;
struct pci_dev *pdev = edev->pdev;
- u32 hw_features;
+ bool udp_tunnel_enable = false;
+ netdev_features_t hw_features;
pci_set_drvdata(pdev, ndev);
@@ -603,16 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- /* Encap features*/
- hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
- ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ hw_features |= NETIF_F_NTUPLE;
+
+ if (edev->dev_info.common.vxlan_enable ||
+ edev->dev_info.common.geneve_enable)
+ udp_tunnel_enable = true;
+
+ if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
+ hw_features |= NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+ }
+
+ if (udp_tunnel_enable) {
+ hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ }
+
+ if (edev->dev_info.common.gre_enable) {
+ hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM);
+ }
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
@@ -750,7 +797,6 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
- struct qed_dev *cdev = edev->cdev;
__qede_lock(edev);
@@ -758,24 +804,12 @@ static void qede_sp_task(struct work_struct *work)
if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev);
- if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_vxlan_port = 1;
- tunn_params.vxlan_port = edev->vxlan_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
- }
-
- if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_geneve_port = 1;
- tunn_params.geneve_port = edev->geneve_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
+#ifdef CONFIG_RFS_ACCEL
+ if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+ if (edev->state == QEDE_STATE_OPEN)
+ qede_process_arfs_filters(edev, false);
}
-
+#endif
__qede_unlock(edev);
}
@@ -786,6 +820,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+#ifdef CONFIG_RFS_ACCEL
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+#endif
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -870,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
/* PTP not supported on VFs */
- if (!is_vf) {
- rc = qede_ptp_register_phc(edev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register PHC\n");
- goto err5;
- }
- }
+ if (!is_vf)
+ qede_ptp_enable(edev, true);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -891,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
-err5:
- unregister_netdev(edev->ndev);
err4:
qede_roce_dev_remove(edev);
err3:
@@ -940,11 +970,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
- cancel_delayed_work_sync(&edev->sp_task);
-
unregister_netdev(ndev);
+ cancel_delayed_work_sync(&edev->sp_task);
- qede_ptp_remove(edev);
+ qede_ptp_disable(edev);
qede_roce_dev_remove(edev);
@@ -1165,9 +1194,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
- if (rxq->rx_buf_size > PAGE_SIZE)
- rxq->rx_buf_size = PAGE_SIZE;
+ /* Make sure that the headroom and payload fit in a single page */
+ if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
/* Segment size to spilt a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
@@ -1229,7 +1260,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
if (txq->is_xdp)
- kfree(txq->sw_tx_ring.pages);
+ kfree(txq->sw_tx_ring.xdp);
else
kfree(txq->sw_tx_ring.skbs);
@@ -1247,9 +1278,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
- txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
- if (!txq->sw_tx_ring.pages)
+ size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.xdp)
goto err;
} else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
@@ -1466,6 +1497,18 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
}
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
+ rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
+ edev->int_info.msix[i].vector);
+ if (rc) {
+ DP_ERR(edev, "Failed to add CPU rmap\n");
+ qede_free_arfs(edev);
+ }
+ }
+#endif
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -1827,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
- qede_ptp_stop(edev);
-
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1847,7 +1888,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
-
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ qede_free_arfs(edev);
+ }
+#endif
/* Release the interrupts */
qede_sync_free_irqs(edev);
edev->ops->common->set_fp_int(edev->cdev, 0);
@@ -1899,6 +1945,13 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ rc = qede_alloc_arfs(edev);
+ if (rc)
+ DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ }
+#endif
qede_napi_add_enable(edev);
DP_INFO(edev, "Napi added and enabled\n");
@@ -1925,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
qede_roce_dev_event_open(edev);
- qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
-
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
-
goto out;
err4:
qede_sync_free_irqs(edev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 2e62dec09bd7..24f06e2ef43e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,6 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
@@ -206,23 +207,10 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
-static void qede_ptp_init_cc(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-}
-
static int qede_ptp_cfg_filters(struct qede_dev *edev)
{
+ enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
if (!ptp)
@@ -236,7 +224,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
edev->flags |= QEDE_TX_TIMESTAMPING_EN;
- ptp->ops->hwtstamp_tx_on(edev->cdev);
+ tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ break;
+
+ case HWTSTAMP_TX_OFF:
+ edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
@@ -247,42 +240,57 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ rx_filter = QED_PTP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ rx_filter = QED_PTP_FILTER_ALL;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+ rx_filter = QED_PTP_FILTER_V1_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+ rx_filter = QED_PTP_FILTER_V2_L2_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev,
- QED_PTP_FILTER_L2_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_GEN;
break;
}
+ ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
+
spin_unlock_bh(&ptp->lock);
return 0;
@@ -324,61 +332,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-/* Called during load, to initialize PTP-related stuff */
-static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
-{
- struct qede_ptp *ptp;
- int rc;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- spin_lock_init(&ptp->lock);
-
- /* Configure PTP in HW */
- rc = ptp->ops->enable(edev->cdev);
- if (rc) {
- DP_ERR(edev, "Stopping PTP initialization\n");
- return;
- }
-
- /* Init work queue for Tx timestamping */
- INIT_WORK(&ptp->work, qede_ptp_task);
-
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- qede_ptp_init_cc(edev);
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
-}
-
-void qede_ptp_start(struct qede_dev *edev, bool init_tc)
-{
- qede_ptp_init(edev, init_tc);
- qede_ptp_cfg_filters(edev);
-}
-
-void qede_ptp_remove(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (ptp && ptp->clock) {
- ptp_clock_unregister(ptp->clock);
- ptp->clock = NULL;
- }
-
- kfree(ptp);
- edev->ptp = NULL;
-}
-
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
@@ -417,8 +370,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
return 0;
}
-/* Called during unload, to stop PTP-related stuff */
-void qede_ptp_stop(struct qede_dev *edev)
+void qede_ptp_disable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
@@ -426,6 +378,11 @@ void qede_ptp_stop(struct qede_dev *edev)
if (!ptp)
return;
+ if (ptp->clock) {
+ ptp_clock_unregister(ptp->clock);
+ ptp->clock = NULL;
+ }
+
/* Cancel PTP work queue. Should be done after the Tx queues are
* drained to prevent additional scheduling.
*/
@@ -439,11 +396,54 @@ void qede_ptp_stop(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
ptp->ops->disable(edev->cdev);
spin_unlock_bh(&ptp->lock);
+
+ kfree(ptp);
+ edev->ptp = NULL;
+}
+
+static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EINVAL;
+
+ spin_lock_init(&ptp->lock);
+
+ /* Configure PTP in HW */
+ rc = ptp->ops->enable(edev->cdev);
+ if (rc) {
+ DP_INFO(edev, "PTP HW enable failed\n");
+ return rc;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (init_tc) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+
+ timecounter_init(&ptp->tc, &ptp->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+
+ return rc;
}
-int qede_ptp_register_phc(struct qede_dev *edev)
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
+ int rc;
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
@@ -454,14 +454,19 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->edev = edev;
ptp->ops = edev->ops->ptp;
if (!ptp->ops) {
- kfree(ptp);
- edev->ptp = NULL;
- DP_ERR(edev, "PTP clock registeration failed\n");
- return -EIO;
+ DP_INFO(edev, "PTP enable failed\n");
+ rc = -EIO;
+ goto err1;
}
edev->ptp = ptp;
+ rc = qede_ptp_init(edev, init_tc);
+ if (rc)
+ goto err1;
+
+ qede_ptp_cfg_filters(edev);
+
/* Fill the ptp_clock_info struct and register PTP clock */
ptp->clock_info.owner = THIS_MODULE;
snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
@@ -478,13 +483,21 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- ptp->clock = NULL;
- kfree(ptp);
- edev->ptp = NULL;
+ rc = -EINVAL;
DP_ERR(edev, "PTP clock registeration failed\n");
+ goto err2;
}
return 0;
+
+err2:
+ qede_ptp_disable(edev);
+ ptp->clock = NULL;
+err1:
+ kfree(ptp);
+ edev->ptp = NULL;
+
+ return rc;
}
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
@@ -495,6 +508,9 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (!ptp)
return;
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
+ return;
+
if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index f328f9bba53a..691a14c4b2c5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -40,10 +40,8 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
-void qede_ptp_start(struct qede_dev *edev, bool init_tc);
-void qede_ptp_stop(struct qede_dev *edev);
-void qede_ptp_remove(struct qede_dev *edev);
-int qede_ptp_register_phc(struct qede_dev *edev);
+void qede_ptp_disable(struct qede_dev *edev);
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index d7107055ec60..2f656f395f39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -128,6 +128,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index e9e647072596..1188d420fe53 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4686,7 +4686,8 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
+ qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index f62c215be779..7116be485e61 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -26,6 +26,7 @@
/* SGMII digital lane registers */
#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
+#define EMAC_SGMII_LN_DRVR_CTRL1 0x0010
#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
#define EMAC_SGMII_LN_TX_MARGINING 0x001C
#define EMAC_SGMII_LN_TX_PRE 0x0020
@@ -48,6 +49,7 @@
#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC
#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8
#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8
+#define EMAC_SGMII_LN_RX_RESECODE_OFFSET 0x02CC
/* SGMII digital lane register values */
#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
@@ -73,6 +75,8 @@
#define CML_GEAR_MODE(x) (((x) & 7) << 3)
#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+#define RESCODE_OFFSET(x) ((x) & 0x1f)
+
#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
#define MIXER_DATARATE_MODE(x) ((x) & 3)
@@ -159,6 +163,8 @@ static const struct emac_reg_write sgmii_laned[] = {
{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
+ {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 040b28977ee7..18c184ee1f3c 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -13,6 +13,7 @@
/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
*/
+#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/acpi.h>
#include <linux/of_device.h>
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 672f6b696069..72233ab9474b 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1406,27 +1406,29 @@ static int cp_get_sset_count (struct net_device *dev, int sset)
}
}
-static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_gset(&cp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
-static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_sset(&cp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
@@ -1578,8 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
.get_sset_count = cp_get_sset_count,
- .get_settings = cp_get_settings,
- .set_settings = cp_set_settings,
.nway_reset = cp_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = cp_get_msglevel,
@@ -1593,6 +1593,8 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
.get_ringparam = cp_get_ringparam,
+ .get_link_ksettings = cp_get_link_ksettings,
+ .set_link_ksettings = cp_set_link_ksettings,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 89631753e799..ca22f2898664 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2384,21 +2384,23 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
-static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock);
- mii_ethtool_gset(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return 0;
}
-static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
int rc;
spin_lock_irq(&tp->lock);
- rc = mii_ethtool_sset(&tp->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return rc;
}
@@ -2480,8 +2482,6 @@ static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_drvinfo = rtl8139_get_drvinfo,
- .get_settings = rtl8139_get_settings,
- .set_settings = rtl8139_set_settings,
.get_regs_len = rtl8139_get_regs_len,
.get_regs = rtl8139_get_regs,
.nway_reset = rtl8139_nway_reset,
@@ -2493,6 +2493,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_strings = rtl8139_get_strings,
.get_sset_count = rtl8139_get_sset_count,
.get_ethtool_stats = rtl8139_get_ethtool_stats,
+ .get_link_ksettings = rtl8139_get_link_ksettings,
+ .set_link_ksettings = rtl8139_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 81f18a833527..0a8f2817ea60 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -817,7 +817,8 @@ struct rtl8169_private {
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*get_link_ksettings)(struct net_device *,
+ struct ethtool_link_ksettings *);
void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
@@ -2115,41 +2116,49 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 status;
+ u32 supported, advertising;
- cmd->supported =
+ supported =
SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_FIBRE;
status = RTL_R32(TBICSR);
- cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->autoneg = !!(status & TBINwEnable);
+ advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->base.autoneg = !!(status & TBINwEnable);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL; /* Always set */
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL; /* Always set */
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
}
-static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
int rc;
rtl_lock_work(tp);
- rc = tp->get_settings(dev, cmd);
+ rc = tp->get_link_ksettings(dev, cmd);
rtl_unlock_work(tp);
return rc;
@@ -2356,7 +2365,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
- .get_settings = rtl8169_get_settings,
.set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
@@ -2368,6 +2376,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
+ .get_link_ksettings = rtl8169_get_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -8351,14 +8360,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
tp->phy_reset_enable = rtl8169_tbi_reset_enable;
tp->phy_reset_pending = rtl8169_tbi_reset_pending;
tp->link_ok = rtl8169_tbi_link_ok;
tp->do_ioctl = rtl_tbi_ioctl;
} else {
tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
tp->phy_reset_enable = rtl8169_xmii_reset_enable;
tp->phy_reset_pending = rtl8169_xmii_reset_pending;
tp->link_ok = rtl8169_xmii_link_ok;
@@ -8444,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
~(RxBOVF | RxFOVF) : ~0;
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
+ setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 0f63a44a955d..bab13613b138 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -33,6 +33,7 @@
#include <net/rtnetlink.h>
#include <net/netevent.h>
#include <net/arp.h>
+#include <net/fib_rules.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <generated/utsrelease.h>
@@ -1115,7 +1116,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
const struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
@@ -1137,13 +1138,14 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_TP;
- ecmd->phy_address = 0xff;
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+
+ ecmd->base.phy_address = 0xff;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.speed = speed;
+ ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -1250,7 +1252,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1263,13 +1265,13 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
- ethtool_cmd_speed(ecmd)))
+ ecmd->base.speed))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
- ecmd->duplex))
+ ecmd->base.duplex))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
- ecmd->autoneg))
+ ecmd->base.autoneg))
return -EMSGSIZE;
rocker_tlv_nest_end(desc_info, cmd_info);
return 0;
@@ -1347,8 +1349,9 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
return 0;
}
-static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_link_ksettings *ecmd)
{
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_get_port_settings_prep, NULL,
@@ -1373,12 +1376,17 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
rocker_cmd_get_port_settings_mode_proc, p_mode);
}
-static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ const struct ethtool_link_ksettings *ecmd)
{
+ struct ethtool_link_ksettings copy_ecmd;
+
+ memcpy(&copy_ecmd, ecmd, sizeof(copy_ecmd));
+
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL);
+ &copy_ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
@@ -2168,7 +2176,10 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
struct rocker_fib_event_work {
struct work_struct work;
- struct fib_entry_notifier_info fen_info;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib_rule_notifier_info fr_info;
+ };
struct rocker *rocker;
unsigned long event;
};
@@ -2178,6 +2189,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
struct rocker_fib_event_work *fib_work =
container_of(work, struct rocker_fib_event_work, work);
struct rocker *rocker = fib_work->rocker;
+ struct fib_rule *rule;
int err;
/* Protect internal structures from changes */
@@ -2195,7 +2207,10 @@ static void rocker_router_fib_event_work(struct work_struct *work)
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- rocker_world_fib4_abort(rocker);
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule))
+ rocker_world_fib4_abort(rocker);
+ fib_rule_put(rule);
break;
}
rtnl_unlock();
@@ -2226,6 +2241,11 @@ static int rocker_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
}
queue_work(rocker->rocker_owq, &fib_work->work);
@@ -2237,16 +2257,18 @@ static int rocker_router_fib_event(struct notifier_block *nb,
* ethtool interface
********************/
-static int rocker_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
}
-static int rocker_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
@@ -2388,13 +2410,13 @@ static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
}
static const struct ethtool_ops rocker_port_ethtool_ops = {
- .get_settings = rocker_port_get_settings,
- .set_settings = rocker_port_set_settings,
.get_drvinfo = rocker_port_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = rocker_port_get_strings,
.get_ethtool_stats = rocker_port_get_stats,
.get_sset_count = rocker_port_get_sset_count,
+ .get_link_ksettings = rocker_port_get_link_ksettings,
+ .set_link_ksettings = rocker_port_set_link_ksettings,
};
/*****************
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c60c2d4c646a..78efb2822b86 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -119,6 +119,7 @@ struct efx_ef10_filter_table {
bool mc_promisc;
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
+ bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
bool vlan_filter;
struct list_head vlan_list;
};
@@ -5058,6 +5059,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
struct netdev_hw_addr *mc;
unsigned int i, addr_count;
+ table->mc_overflow = false;
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
@@ -5065,6 +5067,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
table->mc_promisc = true;
+ table->mc_overflow = true;
break;
}
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
@@ -5469,12 +5472,15 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
}
} else {
/* If we failed to insert promiscuous filters, don't
- * rollback. Regardless, also insert the mc_list
+ * rollback. Regardless, also insert the mc_list,
+ * unless it's incomplete due to overflow
*/
efx_ef10_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
true, false);
- efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
+ if (!table->mc_overflow)
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
} else {
/* If any filters failed to insert, rollback and fall back to
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 104fb15a73f2..f6daf09b8627 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -437,11 +437,13 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- num_tc = ntc->tc;
+ num_tc = ntc->mqprio->num_tc;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
return -EINVAL;
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
if (num_tc == net_dev->num_tc)
return 0;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ff88d60aa6d5..3bdf87f31087 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -665,11 +665,13 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- num_tc = ntc->tc;
+ num_tc = ntc->mqprio->num_tc;
if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
if (num_tc == net_dev->num_tc)
return 0;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 57e6cef81ebe..52ead5524de7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1558,25 +1558,27 @@ static void ioc3_get_drvinfo (struct net_device *dev,
strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
-static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_gset(&ip->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
-static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_sset(&ip->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
@@ -1608,10 +1610,10 @@ static u32 ioc3_get_link(struct net_device *dev)
static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
- .get_settings = ioc3_get_settings,
- .set_settings = ioc3_set_settings,
.nway_reset = ioc3_nway_reset,
.get_link = ioc3_get_link,
+ .get_link_ksettings = ioc3_get_link_ksettings,
+ .set_link_ksettings = ioc3_set_link_ksettings,
};
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 6c2e2b311c16..751c81848f35 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1122,14 +1122,16 @@ static void sc92031_poll_controller(struct net_device *dev)
}
#endif
-static int sc92031_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
u8 phy_address;
u32 phy_ctrl;
u16 output_status;
+ u32 supported, advertising;
spin_lock_bh(&priv->lock);
@@ -1142,68 +1144,77 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
spin_unlock_bh(&priv->lock);
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advertising = ADVERTISED_TP | ADVERTISED_MII;
if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
== (PhyCtrlSpd10 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
== (PhyCtrlSpd100 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (phy_ctrl & PhyCtrlAne)
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ethtool_cmd_speed_set(cmd,
- (output_status & 0x2) ? SPEED_100 : SPEED_10);
- cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_MII;
- cmd->phy_address = phy_address;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
+ cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = phy_address;
+ cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int sc92031_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 phy_ctrl;
u32 old_phy_ctrl;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(speed == SPEED_10 || speed == SPEED_100))
return -EINVAL;
- if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
- return -EINVAL;
- if (!(cmd->port == PORT_MII))
+ if (!(cmd->base.duplex == DUPLEX_HALF ||
+ cmd->base.duplex == DUPLEX_FULL))
return -EINVAL;
- if (!(cmd->phy_address == 0x1f))
+ if (!(cmd->base.port == PORT_MII))
return -EINVAL;
- if (!(cmd->transceiver == XCVR_INTERNAL))
+ if (!(cmd->base.phy_address == 0x1f))
return -EINVAL;
- if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
+ if (!(cmd->base.autoneg == AUTONEG_DISABLE ||
+ cmd->base.autoneg == AUTONEG_ENABLE))
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (!(cmd->advertising & (ADVERTISED_Autoneg
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(advertising & (ADVERTISED_Autoneg
| ADVERTISED_100baseT_Full
| ADVERTISED_100baseT_Half
| ADVERTISED_10baseT_Full
@@ -1213,15 +1224,15 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
phy_ctrl = PhyCtrlAne;
// FIXME: I'm not sure what the original code was trying to do
- if (cmd->advertising & ADVERTISED_Autoneg)
+ if (advertising & ADVERTISED_Autoneg)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
phy_ctrl |= PhyCtrlSpd10;
} else {
// FIXME: Whole branch guessed
@@ -1232,7 +1243,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
else /* cmd->speed == SPEED_100 */
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
phy_ctrl |= PhyCtrlDux;
}
@@ -1368,8 +1379,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops sc92031_ethtool_ops = {
- .get_settings = sc92031_ethtool_get_settings,
- .set_settings = sc92031_ethtool_set_settings,
.get_wol = sc92031_ethtool_get_wol,
.set_wol = sc92031_ethtool_set_wol,
.nway_reset = sc92031_ethtool_nway_reset,
@@ -1377,6 +1386,8 @@ static const struct ethtool_ops sc92031_ethtool_ops = {
.get_strings = sc92031_ethtool_get_strings,
.get_sset_count = sc92031_ethtool_get_sset_count,
.get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
+ .get_link_ksettings = sc92031_ethtool_get_link_ksettings,
+ .set_link_ksettings = sc92031_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 210e35d079dd..02da106c6e04 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1734,18 +1734,20 @@ static void sis190_set_speed_auto(struct net_device *dev)
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
}
-static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii_if, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
}
-static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_sset(&tp->mii_if, cmd);
+ return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
}
static void sis190_get_drvinfo(struct net_device *dev,
@@ -1797,8 +1799,6 @@ static void sis190_set_msglevel(struct net_device *dev, u32 value)
}
static const struct ethtool_ops sis190_ethtool_ops = {
- .get_settings = sis190_get_settings,
- .set_settings = sis190_set_settings,
.get_drvinfo = sis190_get_drvinfo,
.get_regs_len = sis190_get_regs_len,
.get_regs = sis190_get_regs,
@@ -1806,6 +1806,8 @@ static const struct ethtool_ops sis190_ethtool_ops = {
.get_msglevel = sis190_get_msglevel,
.set_msglevel = sis190_set_msglevel,
.nway_reset = sis190_nway_reset,
+ .get_link_ksettings = sis190_get_link_ksettings,
+ .set_link_ksettings = sis190_set_link_ksettings,
};
static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 1b6f6171d078..40bd88362e3d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2035,23 +2035,23 @@ static u32 sis900_get_link(struct net_device *net_dev)
return mii_link_ok(&sis_priv->mii_info);
}
-static int sis900_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
spin_lock_irq(&sis_priv->lock);
- mii_ethtool_gset(&sis_priv->mii_info, cmd);
+ mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return 0;
}
-static int sis900_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
int rt;
spin_lock_irq(&sis_priv->lock);
- rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
+ rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return rt;
}
@@ -2129,11 +2129,11 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.get_msglevel = sis900_get_msglevel,
.set_msglevel = sis900_set_msglevel,
.get_link = sis900_get_link,
- .get_settings = sis900_get_settings,
- .set_settings = sis900_set_settings,
.nway_reset = sis900_nway_reset,
.get_wol = sis900_get_wol,
- .set_wol = sis900_set_wol
+ .set_wol = sis900_set_wol,
+ .get_link_ksettings = sis900_get_link_ksettings,
+ .set_link_ksettings = sis900_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 5f2737189c72..db6dcb06193d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1387,25 +1387,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_gset(&np->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
@@ -1460,14 +1462,14 @@ static void ethtool_complete(struct net_device *dev)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.begin = ethtool_begin,
- .complete = ethtool_complete
+ .complete = ethtool_complete,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 4f19c6166182..36307d34f641 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret, status;
unsigned long flags;
+ u32 supported;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
-
- cmd->autoneg = AUTONEG_DISABLE;
- if (lp->mii.phy_id==1)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->port = 0;
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
- cmd->duplex =
+ cmd->base.duplex =
(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+
ret = 0;
}
@@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP &&
+ cmd->base.port != PORT_AUI))
return -EINVAL;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
ret = 0;
}
@@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
}
static const struct ethtool_ops smc911x_ethtool_ops = {
- .get_settings = smc911x_ethtool_getsettings,
- .set_settings = smc911x_ethtool_setsettings,
.get_drvinfo = smc911x_ethtool_getdrvinfo,
.get_msglevel = smc911x_ethtool_getmsglevel,
.set_msglevel = smc911x_ethtool_setmsglevel,
@@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
.get_eeprom_len = smc911x_ethtool_geteeprom_len,
.get_eeprom = smc911x_ethtool_geteeprom,
.set_eeprom = smc911x_ethtool_seteeprom,
+ .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 97280daba27f..976aa876789a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,56 +1843,60 @@ static int smc_link_ok(struct net_device *dev)
}
}
-static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
+ u32 supported;
- ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
-
- SMC_SELECT_BANK(1);
- tmp = inw(ioaddr + CONFIG);
- ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->phy_address = ioaddr + MGMT;
+ supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->base.port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->base.speed = SPEED_10;
+ ecmd->base.phy_address = ioaddr + MGMT;
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->base.duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+
+ return 0;
}
-static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
- if (ethtool_cmd_speed(ecmd) != SPEED_10)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
+ if (ecmd->base.speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->base.duplex != DUPLEX_HALF &&
+ ecmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_AUI)
+ return -EINVAL;
- if (ecmd->port == PORT_AUI)
- smc_set_xcvr(dev, 1);
- else
- smc_set_xcvr(dev, 0);
+ if (ecmd->base.port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- if (ecmd->duplex == DUPLEX_FULL)
- tmp |= TCR_FDUPLX;
- else
- tmp &= ~TCR_FDUPLX;
- outw(tmp, ioaddr + TCR);
-
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->base.duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(tmp, ioaddr + TCR);
+
+ return 0;
}
static int check_if_running(struct net_device *dev)
@@ -1908,7 +1912,8 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1919,7 +1924,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_gset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1927,7 +1932,8 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return ret;
}
-static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1938,7 +1944,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_sset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_set_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1982,10 +1988,10 @@ static int smc_nway_reset(struct net_device *dev)
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = smc_get_drvinfo,
- .get_settings = smc_get_settings,
- .set_settings = smc_set_settings,
.get_link = smc_get_link,
.nway_reset = smc_nway_reset,
+ .get_link_ksettings = smc_get_link_ksettings,
+ .set_link_ksettings = smc_set_link_ksettings,
};
static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 01a8c020d6db..e93c40b4631e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -26,12 +26,15 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int entry = priv->cur_tx;
- struct dma_desc *desc = priv->dma_tx + entry;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
unsigned int bmax, des2;
unsigned int i = 1, len;
+ struct dma_desc *desc;
+
+ desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
- 0, false);
+ 0, false, skb->len);
while (len != 0) {
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
if (len > bmax) {
des2 = dma_map_single(priv->device,
@@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE, 1,
- false);
+ false, skb->len);
len -= bmax;
i++;
} else {
@@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
/* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE, 1,
- true);
+ true, skb->len);
len = 0;
}
}
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
return entry;
}
@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
if (priv->hwts_rx_en && !priv->extend_desc)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
- (((priv->dirty_rx) + 1) %
+ p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
+ (((rx_q->dirty_rx) + 1) %
DMA_RX_SIZE) *
sizeof(struct dma_desc)));
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- unsigned int entry = priv->dirty_tx;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
- if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+ if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
priv->hwts_tx_en)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
- ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+ p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
+ ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 04d9245b7149..b7ce3fbb5375 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -246,6 +246,15 @@ struct stmmac_extra_stats {
#define STMMAC_TX_MAX_FRAMES 256
#define STMMAC_TX_FRAMES 64
+/* Packets types */
+enum packets_types {
+ PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
+ PACKET_PTPQ = 0x2, /* PTP Packets */
+ PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
+ PACKET_UPQ = 0x4, /* Untagged Packets */
+ PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
+};
+
/* Rx IPC status */
enum rx_frame_status {
good_frame = 0x0,
@@ -324,6 +333,9 @@ struct dma_features {
unsigned int number_tx_queues;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
+ /* TX and RX FIFO sizes */
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -361,7 +373,7 @@ struct stmmac_desc_ops {
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls);
+ bool ls, unsigned int tot_pkt_len);
void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
int len2, bool tx_own, bool ls,
unsigned int tcphdrlen,
@@ -413,6 +425,14 @@ struct stmmac_dma_ops {
int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx, u32 dma_rx, int atds);
+ void (*init_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan);
+ void (*init_rx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan);
+ void (*init_tx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan);
/* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
@@ -421,25 +441,28 @@ struct stmmac_dma_ops {
* An invalid value enables the store-and-forward mode */
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz);
+ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz);
+ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq) (void __iomem *ioaddr);
- void (*disable_dma_irq) (void __iomem *ioaddr);
- void (*start_tx) (void __iomem *ioaddr);
- void (*stop_tx) (void __iomem *ioaddr);
- void (*start_rx) (void __iomem *ioaddr);
- void (*stop_rx) (void __iomem *ioaddr);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*start_tx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_tx)(void __iomem *ioaddr, u32 chan);
+ void (*start_rx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+ struct stmmac_extra_stats *x, u32 chan);
/* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
- void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
- void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
- void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
@@ -451,20 +474,44 @@ struct mac_device_info;
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, int mtu);
+ /* Enable the MAC RX/TX */
+ void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc)(struct mac_device_info *hw);
/* Enable RX Queues */
- void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
+ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+ /* RX Queues Priority */
+ void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* TX Queues Priority */
+ void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* RX Queues Routing */
+ void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
+ u32 queue);
+ /* Program RX Algorithms */
+ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+ /* Program TX Algorithms */
+ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+ /* Set MTL TX queues weight */
+ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ u32 weight, u32 queue);
+ /* RX MTL queue to RX dma mapping */
+ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+ /* Configure AV Algorithm */
+ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+ u32 idle_slope, u32 high_credit, u32 low_credit,
+ u32 queue);
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
int (*host_irq_status)(struct mac_device_info *hw,
struct stmmac_extra_stats *x);
+ /* Handle MTL interrupts */
+ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
/* Multicast filter setting */
void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
/* Flow control setting */
void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time);
+ unsigned int fc, unsigned int pause_time, u32 tx_cnt);
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
@@ -477,7 +524,8 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues);
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
@@ -547,6 +595,11 @@ struct mac_device_info {
unsigned int ps;
};
+struct stmmac_rx_routing {
+ u32 reg_mask;
+ u32 reg_shift;
+};
+
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
int perfect_uc_entries,
int *synopsys_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 1a3fa3d9f855..dd6a2f9791cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -14,16 +14,34 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+struct tegra_eqos {
+ struct device *dev;
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_master;
+ struct clk *clk_slave;
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+
+ struct gpio_desc *reset;
+};
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return 0;
}
+static void *dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ int err;
+
+ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(plat_dat->stmmac_clk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ return ERR_CAST(plat_dat->stmmac_clk);
+ }
+
+ err = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(plat_dat->pclk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ err = PTR_ERR(plat_dat->pclk);
+ goto disable;
+ }
+
+ err = clk_prepare_enable(plat_dat->pclk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
+ err);
+ goto disable;
+ }
+
+ return NULL;
+
+disable:
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ERR_PTR(err);
+}
+
+static int dwc_qos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+#define SDMEMCOMPPADCTRL 0x8800
+#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define AUTO_CAL_CONFIG 0x8804
+#define AUTO_CAL_CONFIG_START BIT(31)
+#define AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define AUTO_CAL_STATUS 0x880c
+#define AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate = 125000000;
+ bool needs_calibration = false;
+ u32 value;
+ int err;
+
+ switch (speed) {
+ case SPEED_1000:
+ needs_calibration = true;
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ needs_calibration = true;
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(eqos->dev, "invalid speed %u\n", speed);
+ break;
+ }
+
+ if (needs_calibration) {
+ /* calibrate */
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ udelay(1);
+
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ value & AUTO_CAL_STATUS_ACTIVE,
+ 1, 10);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration did not start\n");
+ goto failed;
+ }
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ (value & AUTO_CAL_STATUS_ACTIVE) == 0,
+ 20, 200);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration didn't finish\n");
+ goto failed;
+ }
+
+ failed:
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+ } else {
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value &= ~AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+ }
+
+ err = clk_set_rate(eqos->clk_tx, rate);
+ if (err < 0)
+ dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
+}
+
+static int tegra_eqos_init(struct platform_device *pdev, void *priv)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate;
+ u32 value;
+
+ rate = clk_get_rate(eqos->clk_slave);
+
+ value = (rate / 1000000) - 1;
+ writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
+
+ return 0;
+}
+
+static void *tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
+{
+ struct tegra_eqos *eqos;
+ int err;
+
+ eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
+ if (!eqos) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ eqos->dev = &pdev->dev;
+ eqos->regs = res->addr;
+
+ eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
+ if (IS_ERR(eqos->clk_master)) {
+ err = PTR_ERR(eqos->clk_master);
+ goto error;
+ }
+
+ err = clk_prepare_enable(eqos->clk_master);
+ if (err < 0)
+ goto error;
+
+ eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
+ if (IS_ERR(eqos->clk_slave)) {
+ err = PTR_ERR(eqos->clk_slave);
+ goto disable_master;
+ }
+
+ data->stmmac_clk = eqos->clk_slave;
+
+ err = clk_prepare_enable(eqos->clk_slave);
+ if (err < 0)
+ goto disable_master;
+
+ eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(eqos->clk_rx)) {
+ err = PTR_ERR(eqos->clk_rx);
+ goto disable_slave;
+ }
+
+ err = clk_prepare_enable(eqos->clk_rx);
+ if (err < 0)
+ goto disable_slave;
+
+ eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(eqos->clk_tx)) {
+ err = PTR_ERR(eqos->clk_tx);
+ goto disable_rx;
+ }
+
+ err = clk_prepare_enable(eqos->clk_tx);
+ if (err < 0)
+ goto disable_rx;
+
+ eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(eqos->reset)) {
+ err = PTR_ERR(eqos->reset);
+ goto disable_tx;
+ }
+
+ usleep_range(2000, 4000);
+ gpiod_set_value(eqos->reset, 0);
+
+ eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
+ if (IS_ERR(eqos->rst)) {
+ err = PTR_ERR(eqos->rst);
+ goto reset_phy;
+ }
+
+ err = reset_control_assert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ data->fix_mac_speed = tegra_eqos_fix_speed;
+ data->init = tegra_eqos_init;
+ data->bsp_priv = eqos;
+
+ err = tegra_eqos_init(pdev, eqos);
+ if (err < 0)
+ goto reset;
+
+out:
+ return eqos;
+
+reset:
+ reset_control_assert(eqos->rst);
+reset_phy:
+ gpiod_set_value(eqos->reset, 1);
+disable_tx:
+ clk_disable_unprepare(eqos->clk_tx);
+disable_rx:
+ clk_disable_unprepare(eqos->clk_rx);
+disable_slave:
+ clk_disable_unprepare(eqos->clk_slave);
+disable_master:
+ clk_disable_unprepare(eqos->clk_master);
+error:
+ eqos = ERR_PTR(err);
+ goto out;
+}
+
+static int tegra_eqos_remove(struct platform_device *pdev)
+{
+ struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
+
+ reset_control_assert(eqos->rst);
+ gpiod_set_value(eqos->reset, 1);
+ clk_disable_unprepare(eqos->clk_tx);
+ clk_disable_unprepare(eqos->clk_rx);
+ clk_disable_unprepare(eqos->clk_slave);
+ clk_disable_unprepare(eqos->clk_master);
+
+ return 0;
+}
+
+struct dwc_eth_dwmac_data {
+ void *(*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
+ int (*remove)(struct platform_device *pdev);
+};
+
+static const struct dwc_eth_dwmac_data dwc_qos_data = {
+ .probe = dwc_qos_probe,
+ .remove = dwc_qos_remove,
+};
+
+static const struct dwc_eth_dwmac_data tegra_eqos_data = {
+ .probe = tegra_eqos_probe,
+ .remove = tegra_eqos_remove,
+};
+
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
{
+ const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct resource *res;
+ void *priv;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
/**
@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- ret = PTR_ERR(plat_dat->stmmac_clk);
- plat_dat->stmmac_clk = NULL;
- goto err_remove_config_dt;
- }
- clk_prepare_enable(plat_dat->stmmac_clk);
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- ret = PTR_ERR(plat_dat->pclk);
- plat_dat->pclk = NULL;
- goto err_out_clk_dis_phy;
+ priv = data->probe(pdev, plat_dat, &stmmac_res);
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
+ dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
+ goto remove_config;
}
- clk_prepare_enable(plat_dat->pclk);
ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
- return 0;
+ return ret;
-err_out_clk_dis_aper:
- clk_disable_unprepare(plat_dat->pclk);
-err_out_clk_dis_phy:
- clk_disable_unprepare(plat_dat->stmmac_clk);
-err_remove_config_dt:
+remove:
+ data->remove(pdev);
+remove_config:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
@@ -178,11 +479,29 @@ err_remove_config_dt:
static int dwc_eth_dwmac_remove(struct platform_device *pdev)
{
- return stmmac_pltfr_remove(pdev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct dwc_eth_dwmac_data *data;
+ int err;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ err = stmmac_dvr_remove(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = data->remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
- { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
+ { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e5db6ac36235..f0df5193f047 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -74,6 +74,10 @@ struct rk_priv_data {
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define DELAY_ENABLE(soc, tx, rx) \
+ (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
+ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+
#define RK3228_GRF_MAC_CON0 0x0900
#define RK3228_GRF_MAC_CON1 0x0904
@@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
- RK3228_GMAC_RXCLK_DLY_ENABLE |
- RK3228_GMAC_TXCLK_DLY_ENABLE);
+ DELAY_ENABLE(RK3228, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
@@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
- RK3288_GMAC_RXCLK_DLY_ENABLE |
- RK3288_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
- RK3366_GMAC_RXCLK_DLY_ENABLE |
- RK3366_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
- RK3368_GMAC_RXCLK_DLY_ENABLE |
- RK3368_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
- RK3399_GMAC_RXCLK_DLY_ENABLE |
- RK3399_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
return ret;
/*rmii or rgmii*/
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
dev_info(dev, "init for RGMII\n");
bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
bsp_priv->rx_delay);
- } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ dev_info(dev, "init for RGMII_ID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_info(dev, "init for RGMII_RXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dev_info(dev, "init for RGMII_TXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
dev_info(dev, "init for RMII\n");
bsp_priv->ops->set_to_rmii(bsp_priv);
- } else {
+ break;
+ default:
dev_err(dev, "NO interface defined!\n");
}
@@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, unsigned int speed)
struct rk_priv_data *bsp_priv = priv;
struct device *dev = &bsp_priv->pdev->dev;
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ break;
+ case PHY_INTERFACE_MODE_RMII:
bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- else
+ break;
+ default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+ }
}
static int rk_gmac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 19b9b3087099..f3d9305e5f70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
/* Set flow such that DZPQ in Mac Register 6 is 0,
@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value = readl(ioaddr + GMAC_DEBUG);
@@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
.dump_regs = dwmac1000_dump_regs,
.host_irq_status = dwmac1000_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index d3654a447046..471a9aa6ac94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
+ u32 number_chan)
{
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index e370ccec6176..1b3609105484 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
}
static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
static const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac100_rx_ipc_enable,
.dump_regs = dwmac100_dump_mac_regs,
.host_irq_status = dwmac100_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index db45134fddf0..d74cedf2a397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -22,9 +22,15 @@
#define GMAC_HASH_TAB_32_63 0x00000014
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_TXQ_PRTY_MAP0 0x98
+#define GMAC_TXQ_PRTY_MAP1 0x9C
#define GMAC_RXQ_CTRL0 0x000000a0
+#define GMAC_RXQ_CTRL1 0x000000a4
+#define GMAC_RXQ_CTRL2 0x000000a8
+#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
+#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -38,6 +44,22 @@
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+/* RX Queues Routing */
+#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
+#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
+#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
+#define GMAC_RXQCTRL_PTPQ_SHIFT 4
+#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
+#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
+#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
+#define GMAC_RXQCTRL_UPQ_SHIFT 12
+#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
+#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
+#define GMAC_RXQCTRL_MCBCQEN BIT(20)
+#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
+#define GMAC_RXQCTRL_TACPQE BIT(21)
+#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
#define GMAC_PACKET_FILTER_HMC BIT(2)
@@ -53,6 +75,14 @@
/* MAC Flow Control RX */
#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+/* RX Queues Priorities */
+#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
+
+/* TX Queues Priorities */
+#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
+
/* MAC Flow Control TX */
#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
@@ -148,6 +178,8 @@ enum power_event {
/* MAC HW features1 bitmap */
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
+#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
/* MAC HW features2 bitmap */
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
@@ -161,8 +193,25 @@ enum power_event {
#define GMAC_HI_REG_AE BIT(31)
/* MTL registers */
+#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
+#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
+#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
+#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
+#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
+#define MTL_OPERATION_RAA BIT(2)
+#define MTL_OPERATION_RAA_SP (0x0 << 2)
+#define MTL_OPERATION_RAA_WSP (0x1 << 2)
+
#define MTL_INT_STATUS 0x00000c20
-#define MTL_INT_Q0 BIT(0)
+#define MTL_INT_QX(x) BIT(x)
+
+#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
+#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
+#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
+#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
#define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40
@@ -180,6 +229,7 @@ enum power_event {
#define MTL_OP_MODE_TSF BIT(1)
#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
+#define MTL_OP_MODE_TQS_SHIFT 16
#define MTL_OP_MODE_TTC_MASK 0x70
#define MTL_OP_MODE_TTC_SHIFT 4
@@ -193,6 +243,17 @@ enum power_event {
#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
+#define MTL_OP_MODE_RQS_SHIFT 20
+
+#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
+#define MTL_OP_MODE_RFD_SHIFT 14
+
+#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
+#define MTL_OP_MODE_RFA_SHIFT 8
+
+#define MTL_OP_MODE_EHFC BIT(7)
+
#define MTL_OP_MODE_RTC_MASK 0x18
#define MTL_OP_MODE_RTC_SHIFT 3
@@ -201,6 +262,46 @@ enum power_event {
#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+/* MTL ETS Control register */
+#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
+#define MTL_ETS_CTRL_BASE_OFFSET 0x40
+#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
+ ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+#define MTL_ETS_CTRL_CC BIT(3)
+#define MTL_ETS_CTRL_AVALG BIT(2)
+
+/* MTL Queue Quantum Weight */
+#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
+#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
+#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
+ ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
+
+/* MTL sendSlopeCredit register */
+#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
+#define MTL_SEND_SLP_CRED_OFFSET 0x40
+#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
+ ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
+
+/* MTL hiCredit register */
+#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
+#define MTL_HIGH_CRED_OFFSET 0x40
+#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
+ ((x) * MTL_HIGH_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
+
+/* MTL loCredit register */
+#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
+#define MTL_LOW_CRED_OFFSET 0x40
+#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
+ ((x) * MTL_LOW_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
+
/* MTL debug */
#define MTL_DEBUG_TXSTSFSTS BIT(5)
#define MTL_DEBUG_TXFSTS BIT(4)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 1e79e6529c4a..48793f2e9307 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
writel(value, ioaddr + GMAC_INT_EN);
}
-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+ u8 mode, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
value &= GMAC_RX_QUEUE_CLEAR(queue);
- value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
writel(value, ioaddr + GMAC_RXQ_CTRL0);
}
+static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+ value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
+ value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
+ GMAC_TXQCTRL_PSTQX_MASK(queue);
+
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
+ u8 packet, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ const struct stmmac_rx_routing route_possibilities[] = {
+ { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
+ { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
+ { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
+ { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
+ { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
+ };
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+
+ /* routing configuration */
+ value &= ~route_possibilities[packet - 1].reg_mask;
+ value |= (queue << route_possibilities[packet-1].reg_shift) &
+ route_possibilities[packet - 1].reg_mask;
+
+ /* some packets require extra ops */
+ if (packet == PACKET_AVCPQ) {
+ value &= ~GMAC_RXQCTRL_TACPQE;
+ value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
+ } else if (packet == PACKET_MCBCQ) {
+ value &= ~GMAC_RXQCTRL_MCBCQEN;
+ value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
+ }
+
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+}
+
+static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_RAA;
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ value |= MTL_OPERATION_RAA_SP;
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= MTL_OPERATION_RAA_WSP;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_SCHALG_MASK;
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= MTL_OPERATION_SCHALG_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= MTL_OPERATION_SCHALG_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= MTL_OPERATION_SCHALG_DWRR;
+ break;
+ case MTL_TX_ALGORITHM_SP:
+ value |= MTL_OPERATION_SCHALG_SP;
+ break;
+ default:
+ break;
+ }
+}
+
+static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+
+ value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
+ value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
+ writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+}
+
+static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (queue < 4)
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+
+ if (queue == 0 || queue == 4) {
+ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
+ value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
+ }
+
+ if (queue < 4)
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+}
+
+static void dwmac4_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
+ pr_debug("\tsend_slope: 0x%08x\n", send_slope);
+ pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
+ pr_debug("\thigh_credit: 0x%08x\n", high_credit);
+ pr_debug("\tlow_credit: 0x%08x\n", low_credit);
+
+ /* enable AV algorithm */
+ value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ value |= MTL_ETS_CTRL_AVALG;
+ value |= MTL_ETS_CTRL_CC;
+ writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+
+ /* configure send slope */
+ value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
+ value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
+ writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+
+ /* configure idle slope (same register as tx weight) */
+ dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_HC_MASK;
+ value |= high_credit & MTL_HIGH_CRED_HC_MASK;
+ writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_LC_MASK;
+ value |= low_credit & MTL_HIGH_CRED_LC_MASK;
+ writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+}
+
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
{
void __iomem *ioaddr = hw->pcsr;
@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
- u32 channel = STMMAC_CHAN0; /* FIXME */
unsigned int flow = 0;
+ u32 queue = 0;
pr_debug("GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");
- flow |= GMAC_TX_FLOW_CTRL_TFE;
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
- if (duplex) {
+ if (duplex)
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
- flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+
+ if (duplex)
+ flow |=
+ (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}
}
@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
}
}
+static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ int ret = 0;
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+
+ /* Check MTL Interrupt */
+ if (mtl_int_qx_status & MTL_INT_QX(chan)) {
+ /* read Queue x Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(chan));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
static int dwmac4_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
- u32 mtl_int_qx_status;
u32 intr_status;
int ret = 0;
@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
- mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
- /* Check MTL Interrupt: Currently only one queue is used: Q0. */
- if (mtl_int_qx_status & MTL_INT_Q0) {
- /* read Queue 0 Interrupt status */
- u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
-
- if (status & MTL_RX_OVERFLOW_INT) {
- /* clear Interrupt */
- writel(status | MTL_RX_OVERFLOW_INT,
- ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
- ret = CORE_IRQ_MTL_RX_OVERFLOW;
- }
- }
-
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
if (intr_status & PCS_RGSMIIIS_IRQ)
dwmac4_phystatus(ioaddr, x);
@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value;
-
- /* Currently only channel 0 is supported */
- value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
-
- if (value & MTL_DEBUG_TXSTSFSTS)
- x->mtl_tx_status_fifo_full++;
- if (value & MTL_DEBUG_TXFSTS)
- x->mtl_tx_fifo_not_empty++;
- if (value & MTL_DEBUG_TWCSTS)
- x->mmtl_fifo_ctrl++;
- if (value & MTL_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
- >> MTL_DEBUG_TRCSTS_SHIFT;
- if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
- x->mtl_tx_fifo_read_ctrl_write++;
- else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
- x->mtl_tx_fifo_read_ctrl_wait++;
- else if (trcsts == MTL_DEBUG_TRCSTS_READ)
- x->mtl_tx_fifo_read_ctrl_read++;
- else
- x->mtl_tx_fifo_read_ctrl_idle++;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
}
- if (value & MTL_DEBUG_TXPAUSED)
- x->mac_tx_in_pause++;
- value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+ for (queue = 0; queue < rx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
- if (value & MTL_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
- >> MTL_DEBUG_RRCSTS_SHIFT;
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
- if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
- x->mtl_rx_fifo_fill_level_full++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
- x->mtl_rx_fifo_fill_above_thresh++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
- x->mtl_rx_fifo_fill_below_thresh++;
- else
- x->mtl_rx_fifo_fill_level_empty++;
- }
- if (value & MTL_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
- MTL_DEBUG_RRCSTS_SHIFT;
-
- if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
- x->mtl_rx_fifo_read_ctrl_flush++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
- x->mtl_rx_fifo_read_ctrl_read_data++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
- x->mtl_rx_fifo_read_ctrl_status++;
- else
- x->mtl_rx_fifo_read_ctrl_idle++;
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
}
- if (value & MTL_DEBUG_RWCSTS)
- x->mtl_rx_fifo_ctrl_active++;
/* GMAC debug */
value = readl(ioaddr + GMAC_DEBUG);
@@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_tx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
.dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+};
+
+static const struct stmmac_ops dwmac410_ops = {
+ .core_init = dwmac4_core_init,
+ .set_mac = stmmac_dwmac4_set_mac,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_tx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
.flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
@@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->mac = &dwmac4_ops;
-
mac->link.port = GMAC_CONFIG_PS;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed = GMAC_CONFIG_FES;
@@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
else
mac->dma = &dwmac4_dma_ops;
+ if (*synopsys_id >= DWMAC_CORE_4_00)
+ mac->mac = &dwmac410_ops;
+ else
+ mac->mac = &dwmac4_ops;
+
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 843ec69222ea..aa6476439aee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -304,12 +304,13 @@ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
+ tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
if (is_fs)
tdes3 |= TDES3_FIRST_DESCRIPTOR;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index f97b0d5d9987..eec8463057fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-static void dwmac4_dma_init_channel(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 dma_rx_phy,
- u32 channel)
+void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
{
u32 value;
- int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
- int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
- /* set PBL for each channels. Currently we affect same configuration
- * on each channel
- */
- value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
- if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
- writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+}
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ u32 value;
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
- value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
- value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+}
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value;
+
+ /* common channel control register config */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
- writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
- writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK,
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac4_dma_init(void __iomem *ioaddr,
@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value |= DMA_SYS_BUS_AAL;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
-
- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
}
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
_dwmac4_dump_dma_regs(ioaddr, i, reg_space);
}
-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
{
- int i;
+ u32 chan;
- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+ for (chan = 0; chan < number_chan; chan++)
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
}
-static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
- int rxmode, u32 channel)
+static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz)
{
- u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+ unsigned int rqs = fifosz / 256 - 1;
+ u32 mtl_rx_op, mtl_rx_int;
- /* Following code only done for channel 0, other channels not yet
- * supported.
- */
- mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (mode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (mode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (mode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
+ mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+
+ /* enable flow control only if each channel gets 4 KiB or more FIFO */
+ if (fifosz >= 4096) {
+ unsigned int rfd, rfa;
+
+ mtl_rx_op |= MTL_OP_MODE_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
+ mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
- if (txmode == SF_DMA_MODE) {
+ mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
+ mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ /* Enable MTL RX overflow */
+ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+ ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
- if (txmode <= 32)
+ if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
- else if (txmode <= 96)
+ else if (mode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
- else if (txmode <= 128)
+ else if (mode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
- else if (txmode <= 192)
+ else if (mode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
- else if (txmode <= 256)
+ else if (mode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
- else if (txmode <= 384)
+ else if (mode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
-
- mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
- if (rxmode == SF_DMA_MODE) {
- pr_debug("GMAC: enable RX store and forward mode\n");
- mtl_rx_op |= MTL_OP_MODE_RSF;
- } else {
- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
- mtl_rx_op &= ~MTL_OP_MODE_RSF;
- mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
- if (rxmode <= 32)
- mtl_rx_op |= MTL_OP_MODE_RTC_32;
- else if (rxmode <= 64)
- mtl_rx_op |= MTL_OP_MODE_RTC_64;
- else if (rxmode <= 96)
- mtl_rx_op |= MTL_OP_MODE_RTC_96;
- else
- mtl_rx_op |= MTL_OP_MODE_RTC_128;
- }
-
- writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
- /* Enable MTL RX overflow */
- mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
- writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
- ioaddr + MTL_CHAN_INT_CTRL(channel));
-}
-
-static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
-{
- /* Only Channel 0 is actually configured and used */
- dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
}
static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
+ * shifting and store the sizes in bytes.
+ */
+ dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
/* MAC HW feature2 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
/* TX and RX number of channels */
@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
+ .init_chan = dwmac4_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
- .dma_mode = dwmac4_dma_operation_mode,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac4_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
const struct stmmac_dma_ops dwmac410_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
+ .init_chan = dwmac4_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
- .dma_mode = dwmac4_dma_operation_mode,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac410_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 1b06df749e2b..8474bf961dd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -185,17 +185,17 @@
int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr);
-void dwmac4_dma_start_tx(void __iomem *ioaddr);
-void dwmac4_dma_stop_tx(void __iomem *ioaddr);
-void dwmac4_dma_start_rx(void __iomem *ioaddr);
-void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+ struct stmmac_extra_stats *x, u32 chan);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index c7326d5b2f43..49f5687879df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
}
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
}
-void dwmac4_dma_start_tx(void __iomem *ioaddr)
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_start_rx(void __iomem *ioaddr)
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
}
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
- DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ DMA_CHAN_INTR_ENA(chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{
- writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x)
+ struct stmmac_extra_stats *x, u32 chan)
{
int ret = 0;
- u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u32 value;
- value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
x->rx_normal_irq_n++;
@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
* status [21-0] expect reserved bits [5-3]
*/
writel((intr_status & 0x3fffc7),
- ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+ ioaddr + DMA_CHAN_STATUS(chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 56e485f79077..9091df86723a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -137,13 +137,14 @@
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr);
-void dwmac_disable_dma_irq(void __iomem *ioaddr);
-void dwmac_dma_start_tx(void __iomem *ioaddr);
-void dwmac_dma_stop_tx(void __iomem *ioaddr);
-void dwmac_dma_start_rx(void __iomem *ioaddr);
-void dwmac_dma_stop_rx(void __iomem *ioaddr);
-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 chan);
int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e60bfca2a763..38f94305aab5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(0, ioaddr + DMA_INTR_ENA);
}
-void dwmac_dma_start_tx(void __iomem *ioaddr)
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_tx(void __iomem *ioaddr)
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_start_rx(void __iomem *ioaddr)
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_rx(void __iomem *ioaddr)
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
#endif
int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x)
+ struct stmmac_extra_stats *x, u32 chan)
{
int ret = 0;
/* read the status register (CSR5) */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 323b59ec74a3..7546b3664113 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index efb818ebd55e..f817f8f36569 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes1 = le32_to_cpu(p->des1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 452f256ff03f..28e4b5d50ce6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -26,16 +26,17 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int entry = priv->cur_tx;
- struct dma_desc *desc;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
unsigned int bmax, len, des2;
+ struct dma_desc *desc;
if (priv->extend_desc)
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
@@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0, false);
- priv->tx_skbuff[entry] = NULL;
+ STMMAC_RING_MODE, 0,
+ false, skb->len);
+ tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (priv->extend_desc)
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
des2 = dma_map_single(priv->device, skb->data + bmax, len,
DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = len;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_RING_MODE, 1, true);
+ STMMAC_RING_MODE, 1,
+ true, skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = nopaged_len;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 0, true);
+ STMMAC_RING_MODE, 0,
+ true, skb->len);
}
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
return entry;
}
@@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- unsigned int entry = priv->dirty_tx;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
/* des3 is only used for jumbo frames tx or time stamping */
- if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
- (priv->tx_skbuff_dma[entry].last_segment &&
+ if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
+ (tx_q->tx_skbuff_dma[entry].last_segment &&
!priv->extend_desc && priv->hwts_tx_en)))
p->des3 = 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd8fb619b1e9..33efe7038cab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -46,38 +46,51 @@ struct stmmac_tx_info {
bool is_jumbo;
};
-struct stmmac_priv {
- /* Frequently used values are kept adjacent for cache effect */
+/* Frequently used values are kept adjacent for cache effect */
+struct stmmac_tx_queue {
+ u32 queue_index;
+ struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
+ struct stmmac_tx_info *tx_skbuff_dma;
unsigned int cur_tx;
unsigned int dirty_tx;
+ dma_addr_t dma_tx_phy;
+ u32 tx_tail_addr;
+};
+
+struct stmmac_rx_queue {
+ u32 queue_index;
+ struct stmmac_priv *priv_data;
+ struct dma_extended_desc *dma_erx;
+ struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_skbuff_dma;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ u32 rx_zeroc_thresh;
+ dma_addr_t dma_rx_phy;
+ u32 rx_tail_addr;
+ struct napi_struct napi ____cacheline_aligned_in_smp;
+};
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- struct stmmac_tx_info *tx_skbuff_dma;
- dma_addr_t dma_tx_phy;
+
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
bool tso;
- struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
- struct dma_extended_desc *dma_erx;
- struct sk_buff **rx_skbuff;
- unsigned int cur_rx;
- unsigned int dirty_rx;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
- unsigned int rx_zeroc_thresh;
u32 rx_riwt;
int hwts_rx_en;
- dma_addr_t *rx_skbuff_dma;
- dma_addr_t dma_rx_phy;
-
- struct napi_struct napi ____cacheline_aligned_in_smp;
void __iomem *ioaddr;
struct net_device *dev;
@@ -85,6 +98,12 @@ struct stmmac_priv {
struct mac_device_info *hw;
spinlock_t lock;
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+
int oldlink;
int speed;
int oldduplex;
@@ -119,8 +138,6 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
- u32 rx_tail_addr;
- u32 tx_tail_addr;
u32 mss;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 85d64114e159..16808e48ca1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phy = netdev->phydev;
int new_pause = FLOW_OFF;
@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
- priv->pause);
+ priv->pause, tx_cnt);
return 0;
}
@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
if ((priv->hw->mac->debug) &&
(priv->synopsys_id >= DWMAC_CORE_3_50))
priv->hw->mac->debug(priv->ioaddr,
- (void *)&priv->xstats);
+ (void *)&priv->xstats,
+ rx_queues_count, tx_queues_count);
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt;
/* Check not supported parameters */
@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs;
priv->rx_riwt = rx_riwt;
- priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4498a3861aa3..cd8c60132390 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
}
/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ napi_disable(&rx_q->napi);
+ }
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ napi_enable(&rx_q->napi);
+ }
+}
+
+/**
+ * stmmac_stop_all_queues - Stop all queues
+ * @priv: driver private structure
+ */
+static void stmmac_stop_all_queues(struct stmmac_priv *priv)
+{
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_cnt; queue++)
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
+ * stmmac_start_all_queues - Start all queues
+ * @priv: driver private structure
+ */
+static void stmmac_start_all_queues(struct stmmac_priv *priv)
+{
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_cnt; queue++)
+ netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
* stmmac_clk_csr_set - dynamically set the MDC clock
* @priv: driver private structure
* Description: this is to dynamically set the MDC clock according to the csr
@@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf, int len)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
u32 avail;
- if (priv->dirty_tx > priv->cur_tx)
- avail = priv->dirty_tx - priv->cur_tx - 1;
+ if (tx_q->dirty_tx > tx_q->cur_tx)
+ avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
+ avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
u32 dirty;
- if (priv->dirty_rx <= priv->cur_rx)
- dirty = priv->cur_rx - priv->dirty_rx;
+ if (rx_q->dirty_rx <= rx_q->cur_rx)
+ dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
+ dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
*/
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* check if all TX queues have the work finished */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ return; /* still unfinished work */
+ }
+
/* Check and enter in LPI mode */
- if ((priv->dirty_tx == priv->cur_tx) &&
- (priv->tx_path_in_lpi_mode == false))
+ if (!priv->tx_path_in_lpi_mode)
priv->hw->mac->set_eee_mode(priv->hw,
priv->plat->en_tx_lpi_clockgating);
}
@@ -673,6 +748,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+/**
* stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
* Description: this is the helper called by the physical abstraction layer
@@ -687,7 +775,6 @@ static void stmmac_adjust_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
- unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
if (!phydev)
return;
@@ -709,8 +796,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/* Flow Control operation */
if (phydev->pause)
- priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
- fc, pause_time);
+ stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
new_state = 1;
@@ -878,22 +964,56 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv)
{
- void *head_rx, *head_tx;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ void *head_rx;
+ u32 queue;
- if (priv->extend_desc) {
- head_rx = (void *)priv->dma_erx;
- head_tx = (void *)priv->dma_etx;
- } else {
- head_rx = (void *)priv->dma_rx;
- head_tx = (void *)priv->dma_tx;
+ /* Display RX rings */
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ pr_info("\tRX Queue %u rings\n", queue);
+
+ if (priv->extend_desc)
+ head_rx = (void *)rx_q->dma_erx;
+ else
+ head_rx = (void *)rx_q->dma_rx;
+
+ /* Display RX ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ void *head_tx;
+ u32 queue;
+
+ /* Display TX rings */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ pr_info("\tTX Queue %d rings\n", queue);
+
+ if (priv->extend_desc)
+ head_tx = (void *)tx_q->dma_etx;
+ else
+ head_tx = (void *)tx_q->dma_tx;
+
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv);
- /* Display Rx ring */
- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
- /* Display Tx ring */
- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -913,48 +1033,88 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
}
/**
- * stmmac_clear_descriptors - clear descriptors
+ * stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
- * Description: this function is called to clear the tx and rx descriptors
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
- /* Clear the Rx/Tx descriptors */
+ /* Clear the RX descriptors */
for (i = 0; i < DMA_RX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+ priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
else
- priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+ priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ /* Clear the TX descriptors */
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
priv->mode,
(i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
priv->mode,
(i == DMA_TX_SIZE - 1));
}
/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Clear the RX descriptors */
+ for (queue = 0; queue < rx_queue_cnt; queue++)
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Clear the TX descriptors */
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ stmmac_clear_tx_descriptors(priv, queue);
+}
+
+/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
* @p: descriptor pointer
* @i: descriptor index
- * @flags: gfp flag.
+ * @flags: gfp flag
+ * @queue: RX queue index
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i, gfp_t flags)
+ int i, gfp_t flags, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct sk_buff *skb;
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
@@ -963,20 +1123,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
"%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
- priv->rx_skbuff[i] = skb;
- priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ rx_q->rx_skbuff[i] = skb;
+ rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+ if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
else
- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -985,30 +1145,71 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return 0;
}
-static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
- if (priv->rx_skbuff[i]) {
- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ if (rx_q->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
+ dev_kfree_skb_any(rx_q->rx_skbuff[i]);
}
- priv->rx_skbuff[i] = NULL;
+ rx_q->rx_skbuff[i] = NULL;
}
/**
- * init_dma_desc_rings - init the RX/TX descriptor rings
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->tx_skbuff_dma[i].buf) {
+ if (tx_q->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ }
+
+ if (tx_q->tx_skbuff[i]) {
+ dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+ tx_q->tx_skbuff[i] = NULL;
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ }
+}
+
+/**
+ * init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
* @flags: gfp flag.
- * Description: this function initializes the DMA RX/TX descriptors
+ * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
- int i;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
unsigned int bfsize = 0;
int ret = -ENOMEM;
+ u32 queue;
+ int i;
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
@@ -1018,235 +1219,409 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dma_buf_sz = bfsize;
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
- __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
-
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
- for (i = 0; i < DMA_RX_SIZE; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_erx + i)->basic);
- else
- p = priv->dma_rx + i;
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- ret = stmmac_init_rx_buffers(priv, p, i, flags);
- if (ret)
- goto err_init_rx_buffers;
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
- priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
- (unsigned int)priv->rx_skbuff_dma[i]);
+ for (i = 0; i < DMA_RX_SIZE; i++) {
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ goto err_init_rx_buffers;
+
+ netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+ rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
+ (unsigned int)rx_q->rx_skbuff_dma[i]);
+ }
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ priv->hw->mode->init(rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ DMA_RX_SIZE, 1);
+ else
+ priv->hw->mode->init(rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ DMA_RX_SIZE, 0);
+ }
}
- priv->cur_rx = 0;
- priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
buf_sz = bfsize;
- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc) {
- priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
- DMA_RX_SIZE, 1);
- priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
- DMA_TX_SIZE, 1);
- } else {
- priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
- DMA_RX_SIZE, 0);
- priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
- DMA_TX_SIZE, 0);
- }
+ return 0;
+
+err_init_rx_buffers:
+ while (queue >= 0) {
+ while (--i >= 0)
+ stmmac_free_rx_buffer(priv, queue, i);
+
+ if (queue == 0)
+ break;
+
+ i = DMA_RX_SIZE;
+ queue--;
}
- /* TX INITIALIZATION */
- for (i = 0; i < DMA_TX_SIZE; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
+ return ret;
+}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
+/**
+ * init_dma_tx_desc_rings - init the TX descriptor rings
+ * @dev: net device structure.
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int i;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ priv->hw->mode->init(tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ DMA_TX_SIZE, 1);
+ else
+ priv->hw->mode->init(tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ DMA_TX_SIZE, 0);
+ }
+
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
}
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- priv->tx_skbuff_dma[i].len = 0;
- priv->tx_skbuff_dma[i].last_segment = false;
- priv->tx_skbuff[i] = NULL;
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
}
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
- netdev_reset_queue(priv->dev);
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_rx_desc_rings(dev, flags);
+ if (ret)
+ return ret;
+
+ ret = init_dma_tx_desc_rings(dev);
stmmac_clear_descriptors(priv);
if (netif_msg_hw(priv))
stmmac_display_rings(priv);
- return 0;
-err_init_rx_buffers:
- while (--i >= 0)
- stmmac_free_rx_buffers(priv, i);
return ret;
}
-static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
for (i = 0; i < DMA_RX_SIZE; i++)
- stmmac_free_rx_buffers(priv, i);
+ stmmac_free_rx_buffer(priv, queue, i);
}
-static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_TX_SIZE; i++) {
- if (priv->tx_skbuff_dma[i].buf) {
- if (priv->tx_skbuff_dma[i].map_as_page)
- dma_unmap_page(priv->device,
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
- }
+ for (i = 0; i < DMA_TX_SIZE; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+}
- if (priv->tx_skbuff[i]) {
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- }
+/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ /* Free RX queue resources */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ dma_free_rx_skbufs(priv, queue);
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ kfree(rx_q->rx_skbuff_dma);
+ kfree(rx_q->rx_skbuff);
}
}
/**
- * alloc_dma_desc_resources - alloc TX/RX resources.
+ * free_dma_tx_desc_resources - free TX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue = 0;
+
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, queue);
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_desc),
+ tx_q->dma_tx, tx_q->dma_tx_phy);
+ else
+ dma_free_coherent(priv->device, DMA_TX_SIZE *
+ sizeof(struct dma_extended_desc),
+ tx_q->dma_etx, tx_q->dma_tx_phy);
+
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+ }
+}
+
+/**
+ * alloc_dma_rx_desc_resources - alloc RX resources.
* @priv: private structure
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
+ u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
+ u32 queue;
- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- return -ENOMEM;
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*priv->tx_skbuff_dma),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
- if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
+ rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
+ sizeof(dma_addr_t),
GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
+ if (!rx_q->rx_skbuff_dma)
+ return -ENOMEM;
- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!rx_q->rx_skbuff)
goto err_dma;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_zalloc_coherent(priv->device,
+ DMA_RX_SIZE *
+ sizeof(struct
+ dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ goto err_dma;
+
+ } else {
+ rx_q->dma_rx = dma_zalloc_coherent(priv->device,
+ DMA_RX_SIZE *
+ sizeof(struct
+ dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ goto err_dma;
}
}
return 0;
err_dma:
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
+ free_dma_rx_desc_resources(priv);
+
return ret;
}
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+/**
+ * alloc_dma_tx_desc_resources - alloc TX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
- /* Release the DMA TX/RX socket buffers */
- dma_free_rx_skbufs(priv);
- dma_free_tx_skbufs(priv);
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc) {
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ int ret = -ENOMEM;
+ u32 queue;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
+
+ tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
+
+ tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ goto err_dma_buffers;
+
+ if (priv->extend_desc) {
+ tx_q->dma_etx = dma_zalloc_coherent(priv->device,
+ DMA_TX_SIZE *
+ sizeof(struct
+ dma_extended_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
+ if (!tx_q->dma_etx)
+ goto err_dma_buffers;
+ } else {
+ tx_q->dma_tx = dma_zalloc_coherent(priv->device,
+ DMA_TX_SIZE *
+ sizeof(struct
+ dma_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
+ if (!tx_q->dma_tx)
+ goto err_dma_buffers;
+ }
}
- kfree(priv->rx_skbuff_dma);
- kfree(priv->rx_skbuff);
- kfree(priv->tx_skbuff_dma);
- kfree(priv->tx_skbuff);
+
+ return 0;
+
+err_dma_buffers:
+ free_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* RX Allocation */
+ int ret = alloc_dma_rx_desc_resources(priv);
+
+ if (ret)
+ return ret;
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA RX socket buffers */
+ free_dma_rx_desc_resources(priv);
+
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
}
/**
@@ -1256,19 +1631,104 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
- int rx_count = priv->dma_cap.number_rx_queues;
- int queue = 0;
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
- /* If GMAC does not have multiple queues, then this is not necessary*/
- if (rx_count == 1)
- return;
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+ }
+}
- /**
- * If the core is synthesized with multiple rx queues / multiple
- * dma channels, then rx queues will be disabled by default.
- * For now only rx queue 0 is enabled.
- */
- priv->hw->mac->rx_queue_enable(priv->hw, queue);
+/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ priv->hw->dma->start_rx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ priv->hw->dma->start_tx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_rx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_tx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_start_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
}
/**
@@ -1279,11 +1739,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
- if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
- else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+ rxmode = tc;
+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
@@ -1291,37 +1760,53 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
*/
- priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
- rxfifosz);
+ txmode = SF_DMA_MODE;
+ rxmode = SF_DMA_MODE;
priv->xstats.threshold = SF_DMA_MODE;
- } else
- priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
+ } else {
+ txmode = tc;
+ rxmode = SF_DMA_MODE;
+ }
+
+ /* configure all channels */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ for (chan = 0; chan < rx_channels_count; chan++)
+ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+ rxfifosz);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
+ }
}
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
+ * @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static void stmmac_tx_clean(struct stmmac_priv *priv)
+static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry = priv->dirty_tx;
+ unsigned int entry = tx_q->dirty_tx;
netif_tx_lock(priv->dev);
priv->xstats.tx_clean++;
- while (entry != priv->cur_tx) {
- struct sk_buff *skb = priv->tx_skbuff[entry];
+ while (entry != tx_q->cur_tx) {
+ struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
int status;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_etx + entry);
+ p = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- p = priv->dma_tx + entry;
+ p = tx_q->dma_tx + entry;
status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
@@ -1342,48 +1827,51 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
stmmac_get_tx_hwtstamp(priv, p, skb);
}
- if (likely(priv->tx_skbuff_dma[entry].buf)) {
- if (priv->tx_skbuff_dma[entry].map_as_page)
+ if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
+ if (tx_q->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
- priv->tx_skbuff_dma[entry].buf,
- priv->tx_skbuff_dma[entry].len,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[entry].buf,
- priv->tx_skbuff_dma[entry].len,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry].buf = 0;
- priv->tx_skbuff_dma[entry].len = 0;
- priv->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->tx_skbuff_dma[entry].len = 0;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
}
if (priv->hw->mode->clean_desc3)
- priv->hw->mode->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(tx_q, p);
- priv->tx_skbuff_dma[entry].last_segment = false;
- priv->tx_skbuff_dma[entry].is_jumbo = false;
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
if (likely(skb != NULL)) {
pkts_compl++;
bytes_compl += skb->len;
dev_consume_skb_any(skb);
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
}
priv->hw->desc->release_tx_desc(p, priv->mode);
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
}
- priv->dirty_tx = entry;
+ tx_q->dirty_tx = entry;
- netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+ pkts_compl, bytes_compl);
+
+ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+ queue))) &&
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
- if (unlikely(netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
netif_dbg(priv, tx_done, priv->dev,
"%s: restart transmit\n", __func__);
- netif_wake_queue(priv->dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
@@ -1393,45 +1881,76 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
netif_tx_unlock(priv->dev);
}
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
{
- priv->hw->dma->enable_dma_irq(priv->ioaddr);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
}
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
{
- priv->hw->dma->disable_dma_irq(priv->ioaddr);
+ priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
}
/**
* stmmac_tx_err - to manage the tx error
* @priv: driver private structure
+ * @chan: channel index
* Description: it cleans the descriptors and restarts the transmission
* in case of transmission errors.
*/
-static void stmmac_tx_err(struct stmmac_priv *priv)
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int i;
- netif_stop_queue(priv->dev);
- priv->hw->dma->stop_tx(priv->ioaddr);
- dma_free_tx_skbufs(priv);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+ stmmac_stop_tx_dma(priv, chan);
+ dma_free_tx_skbufs(priv, chan);
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
priv->mode,
(i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
priv->mode,
(i == DMA_TX_SIZE - 1));
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
- netdev_reset_queue(priv->dev);
- priv->hw->dma->start_tx(priv->ioaddr);
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ stmmac_start_tx_dma(priv, chan);
priv->dev->stats.tx_errors++;
- netif_wake_queue(priv->dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ * @priv: driver private structure
+ * @txmode: TX operating mode
+ * @rxmode: RX operating mode
+ * @chan: channel index
+ * Description: it is used for configuring of the DMA operation mode in
+ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ * mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+{
+ int rxfifosz = priv->plat->rx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+ rxfifosz);
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+ rxfifosz);
+ }
}
/**
@@ -1443,31 +1962,43 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
int status;
- int rxfifosz = priv->plat->rx_fifo_size;
+ u32 chan;
- status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
- if (likely((status & handle_rx)) || (status & handle_tx)) {
- if (likely(napi_schedule_prep(&priv->napi))) {
- stmmac_disable_dma_irq(priv);
- __napi_schedule(&priv->napi);
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ &priv->xstats, chan);
+ if (likely((status & handle_rx)) || (status & handle_tx)) {
+ if (likely(napi_schedule_prep(&rx_q->napi))) {
+ stmmac_disable_dma_irq(priv, chan);
+ __napi_schedule(&rx_q->napi);
+ }
}
- }
- if (unlikely(status & tx_hard_error_bump_tc)) {
- /* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
- rxfifosz);
- else
- priv->hw->dma->dma_mode(priv->ioaddr, tc,
- SF_DMA_MODE, rxfifosz);
- priv->xstats.threshold = tc;
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+ (tc <= 256)) {
+ tc += 64;
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ tc,
+ chan);
+ else
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ SF_DMA_MODE,
+ chan);
+ priv->xstats.threshold = tc;
+ }
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
}
- } else if (unlikely(status == tx_hard_error))
- stmmac_tx_err(priv);
+ }
}
/**
@@ -1574,6 +2105,13 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 dummy_dma_rx_phy = 0;
+ u32 dummy_dma_tx_phy = 0;
+ u32 chan = 0;
int atds = 0;
int ret = 0;
@@ -1591,19 +2129,49 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
- priv->dma_tx_phy, priv->dma_rx_phy, atds);
-
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->rx_tail_addr = priv->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
- STMMAC_CHAN0);
+ /* DMA Configuration */
+ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+ dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ priv->hw->dma->init_rx_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ rx_q->rx_tail_addr,
+ chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ priv->hw->dma->init_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ chan);
+
+ priv->hw->dma->init_tx_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
- priv->tx_tail_addr = priv->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
+ tx_q->tx_tail_addr,
+ chan);
+ }
+ } else {
+ rx_q = &priv->rx_queue[chan];
+ tx_q = &priv->tx_queue[chan];
+ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
}
if (priv->plat->axi && priv->hw->dma->axi)
@@ -1621,8 +2189,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer(unsigned long data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
- stmmac_tx_clean(priv);
+ /* let's scan all the tx queues */
+ for (queue = 0; queue < tx_queues_count; queue++)
+ stmmac_tx_clean(priv, queue);
}
/**
@@ -1644,6 +2216,196 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
add_timer(&priv->txtimer);
}
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len) {
+ for (chan = 0; chan < tx_channels_count; chan++)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1), chan);
+ }
+
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len) {
+ for (chan = 0; chan < rx_channels_count; chan++)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1), chan);
+ }
+}
+
+/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ /* queue 0 is reserved for legacy traffic */
+ for (queue = 1; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ priv->hw->mac->config_cbs(priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ if (!priv->plat->rx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->rx_queues_cfg[queue].prio;
+ priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ if (!priv->plat->tx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->tx_queues_cfg[queue].prio;
+ priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u8 packet;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ /* no specific packet type routing specified for the queue */
+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+ continue;
+
+ packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+ priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
+ }
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
+ priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
+ priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ if (priv->hw->mac->map_mtl_to_dma)
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ if (priv->hw->mac->rx_queue_enable)
+ stmmac_mac_enable_rx_queues(priv);
+
+ /* Set RX priorities */
+ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
+ stmmac_mac_config_rx_queues_prio(priv);
+
+ /* Set TX priorities */
+ if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
+ stmmac_mac_config_tx_queues_prio(priv);
+
+ /* Set RX routing */
+ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
+ stmmac_mac_config_rx_queues_routing(priv);
+}
+
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -1659,6 +2421,9 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 chan;
int ret;
/* DMA initialization and SW reset */
@@ -1688,9 +2453,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
- /* Initialize MAC RX Queues */
- if (priv->hw->mac->rx_queue_enable)
- stmmac_mac_enable_rx_queues(priv);
+ /* Initialize MTL*/
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_mtl_configuration(priv);
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
@@ -1700,10 +2465,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_dwmac4_set_mac(priv->ioaddr, true);
- else
- stmmac_set_mac(priv->ioaddr, true);
+ priv->hw->mac->set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1711,6 +2473,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -1725,35 +2491,37 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
__func__);
#endif
/* Start the ball rolling... */
- netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
+ stmmac_start_all_dma(priv);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
}
if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
- /* set TX ring length */
- if (priv->hw->dma->set_tx_ring_len)
- priv->hw->dma->set_tx_ring_len(priv->ioaddr,
- (DMA_TX_SIZE - 1));
- /* set RX ring length */
- if (priv->hw->dma->set_rx_ring_len)
- priv->hw->dma->set_rx_ring_len(priv->ioaddr,
- (DMA_RX_SIZE - 1));
+ /* set TX and RX rings length */
+ stmmac_set_rings_length(priv);
+
/* Enable TSO */
- if (priv->tso)
- priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+ if (priv->tso) {
+ for (chan = 0; chan < tx_cnt; chan++)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
+ }
return 0;
}
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -1821,7 +2589,7 @@ static int stmmac_open(struct net_device *dev)
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
- goto init_error;
+ goto irq_error;
}
/* Request the Wake IRQ in case of another line is used for WoL */
@@ -1848,8 +2616,8 @@ static int stmmac_open(struct net_device *dev)
}
}
- napi_enable(&priv->napi);
- netif_start_queue(dev);
+ stmmac_enable_all_queues(priv);
+ stmmac_start_all_queues(priv);
return 0;
@@ -1858,7 +2626,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
+irq_error:
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+ del_timer_sync(&priv->txtimer);
+ stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
@@ -1887,9 +2660,9 @@ static int stmmac_release(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- netif_stop_queue(dev);
+ stmmac_stop_all_queues(priv);
- napi_disable(&priv->napi);
+ stmmac_disable_all_queues(priv);
del_timer_sync(&priv->txtimer);
@@ -1901,14 +2674,13 @@ static int stmmac_release(struct net_device *dev)
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
@@ -1927,22 +2699,24 @@ static int stmmac_release(struct net_device *dev)
* @des: buffer start address
* @total_len: total length to fill in descriptors
* @last_segmant: condition for the last descriptor
+ * @queue: TX queue index
* Description:
* This function fills descriptor and request new descriptors according to
* buffer length to fill
*/
static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
- int total_len, bool last_segment)
+ int total_len, bool last_segment, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
struct dma_desc *desc;
- int tmp_len;
u32 buff_size;
+ int tmp_len;
tmp_len = total_len;
while (tmp_len > 0) {
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
- desc = priv->dma_tx + priv->cur_tx;
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
@@ -1986,23 +2760,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
*/
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 pay_len, mss;
- int tmp_pay_len = 0;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
+ u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, des;
- struct dma_desc *desc, *first, *mss_desc = NULL;
+ struct stmmac_tx_queue *tx_q;
+ int tmp_pay_len = 0;
+ u32 pay_len, mss;
u8 proto_hdr_len;
int i;
+ tx_q = &priv->tx_queue[queue];
+
/* Compute header lengths */
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
/* Desc availability based on threshold should be enough safe */
- if (unlikely(stmmac_tx_avail(priv) <
+ if (unlikely(stmmac_tx_avail(priv, queue) <
(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
@@ -2017,10 +2796,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != priv->mss) {
- mss_desc = priv->dma_tx + priv->cur_tx;
+ mss_desc = tx_q->dma_tx + tx_q->cur_tx;
priv->hw->desc->set_mss(mss_desc, mss);
priv->mss = mss;
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
}
if (netif_msg_tx_queued(priv)) {
@@ -2030,9 +2809,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- first_entry = priv->cur_tx;
+ first_entry = tx_q->cur_tx;
- desc = priv->dma_tx + first_entry;
+ desc = tx_q->dma_tx + first_entry;
first = desc;
/* first descriptor: fill Headers on Buf1 */
@@ -2041,9 +2820,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = des;
- priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- priv->tx_skbuff[first_entry] = skb;
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff[first_entry] = skb;
first->des0 = cpu_to_le32(des);
@@ -2054,7 +2833,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
@@ -2063,24 +2842,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
stmmac_tso_allocator(priv, des, skb_frag_size(frag),
- (i == nfrags - 1));
+ (i == nfrags - 1), queue);
- priv->tx_skbuff_dma[priv->cur_tx].buf = des;
- priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
- priv->tx_skbuff[priv->cur_tx] = NULL;
- priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+ tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
}
- priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
__func__);
- netif_stop_queue(dev);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
dev->stats.tx_bytes += skb->len;
@@ -2112,7 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tso_tx_desc(first, 1,
proto_hdr_len,
pay_len,
- 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
@@ -2127,20 +2908,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
- priv->cur_tx, first, nfrags);
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ tx_q->cur_tx, first, nfrags);
- priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
0);
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
return NETDEV_TX_OK;
@@ -2164,21 +2945,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
+ u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
+ struct stmmac_tx_queue *tx_q;
unsigned int enh_desc;
unsigned int des;
+ tx_q = &priv->tx_queue[queue];
+
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
return stmmac_tso_xmit(skb, dev);
}
- if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
@@ -2190,19 +2976,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
- entry = priv->cur_tx;
+ entry = tx_q->cur_tx;
first_entry = entry;
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
first = desc;
- priv->tx_skbuff[first_entry] = skb;
+ tx_q->tx_skbuff[first_entry] = skb;
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
@@ -2211,7 +2997,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
DWMAC_CORE_4_00)) {
- entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
}
@@ -2224,48 +3010,49 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
des = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = des;
+ tx_q->tx_skbuff_dma[entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
desc->des0 = cpu_to_le32(des);
else
desc->des2 = cpu_to_le32(des);
- priv->tx_skbuff_dma[entry].map_as_page = true;
- priv->tx_skbuff_dma[entry].len = len;
- priv->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].map_as_page = true;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
/* Prepare the descriptor and set the own bit too */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
- priv->mode, 1, last_segment);
+ priv->mode, 1, last_segment,
+ skb->len);
}
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
void *tx_head;
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- tx_head = (void *)priv->dma_etx;
+ tx_head = (void *)tx_q->dma_etx;
else
- tx_head = (void *)priv->dma_tx;
+ tx_head = (void *)tx_q->dma_tx;
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
@@ -2273,10 +3060,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
print_pkt(skb->data, skb->len);
}
- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
__func__);
- netif_stop_queue(dev);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
dev->stats.tx_bytes += skb->len;
@@ -2311,14 +3098,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
first->des0 = cpu_to_le32(des);
else
first->des2 = cpu_to_le32(des);
- priv->tx_skbuff_dma[first_entry].len = nopaged_len;
- priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
@@ -2330,7 +3117,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
- last_segment);
+ last_segment, skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
@@ -2339,13 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_wmb();
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
if (priv->synopsys_id < DWMAC_CORE_4_00)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
else
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
return NETDEV_TX_OK;
@@ -2373,9 +3160,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
-static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
{
- if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+ if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
return 0;
return 1;
@@ -2384,30 +3171,33 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
+ * @queue: RX queue index
* Description : this is to reallocate the skb for the reception process
* that is based on zero-copy.
*/
-static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int entry = rx_q->dirty_rx;
+
int bfsize = priv->dma_buf_sz;
- unsigned int entry = priv->dirty_rx;
- int dirty = stmmac_rx_dirty(priv);
while (dirty-- > 0) {
struct dma_desc *p;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_erx + entry);
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
- p = priv->dma_rx + entry;
+ p = rx_q->dma_rx + entry;
- if (likely(priv->rx_skbuff[entry] == NULL)) {
+ if (likely(!rx_q->rx_skbuff[entry])) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(!skb)) {
/* so for a while no zero-copy! */
- priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+ rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
if (unlikely(net_ratelimit()))
dev_err(priv->device,
"fail to alloc skb entry %d\n",
@@ -2415,28 +3205,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
break;
}
- priv->rx_skbuff[entry] = skb;
- priv->rx_skbuff_dma[entry] =
+ rx_q->rx_skbuff[entry] = skb;
+ rx_q->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device,
- priv->rx_skbuff_dma[entry])) {
+ rx_q->rx_skbuff_dma[entry])) {
netdev_err(priv->dev, "Rx DMA map failed\n");
dev_kfree_skb(skb);
break;
}
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
p->des1 = 0;
} else {
- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
}
if (priv->hw->mode->refill_desc3)
- priv->hw->mode->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(rx_q, p);
- if (priv->rx_zeroc_thresh > 0)
- priv->rx_zeroc_thresh--;
+ if (rx_q->rx_zeroc_thresh > 0)
+ rx_q->rx_zeroc_thresh--;
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
@@ -2452,31 +3242,33 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
- priv->dirty_rx = entry;
+ rx_q->dirty_rx = entry;
}
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
- * @limit: napi bugget.
+ * @limit: napi bugget
+ * @queue: RX queue index.
* Description : this the function called by the napi poll method.
* It gets all the frames inside the ring.
*/
-static int stmmac_rx(struct stmmac_priv *priv, int limit)
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- unsigned int entry = priv->cur_rx;
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int entry = rx_q->cur_rx;
+ int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
- int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
void *rx_head;
netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- rx_head = (void *)priv->dma_erx;
+ rx_head = (void *)rx_q->dma_erx;
else
- rx_head = (void *)priv->dma_rx;
+ rx_head = (void *)rx_q->dma_rx;
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
@@ -2486,9 +3278,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
struct dma_desc *np;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_erx + entry);
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
- p = priv->dma_rx + entry;
+ p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
status = priv->hw->desc->rx_status(&priv->dev->stats,
@@ -2499,20 +3291,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
count++;
- priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
- next_entry = priv->cur_rx;
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+ next_entry = rx_q->cur_rx;
if (priv->extend_desc)
- np = (struct dma_desc *)(priv->dma_erx + next_entry);
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
else
- np = priv->dma_rx + next_entry;
+ np = rx_q->dma_rx + next_entry;
prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
&priv->xstats,
- priv->dma_erx +
+ rx_q->dma_erx +
entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
@@ -2522,9 +3314,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
- priv->rx_skbuff[entry] = NULL;
+ rx_q->rx_skbuff[entry] = NULL;
dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
+ rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
@@ -2572,7 +3364,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
*/
if (unlikely(!priv->plat->has_gmac4 &&
((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv)))) {
+ stmmac_rx_threshold_count(rx_q)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2584,21 +3376,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
dma_sync_single_for_cpu(priv->device,
- priv->rx_skbuff_dma
+ rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
- priv->
+ rx_q->
rx_skbuff[entry]->data,
frame_len);
skb_put(skb, frame_len);
dma_sync_single_for_device(priv->device,
- priv->rx_skbuff_dma
+ rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
} else {
- skb = priv->rx_skbuff[entry];
+ skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
@@ -2607,12 +3399,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
break;
}
prefetch(skb->data - NET_IP_ALIGN);
- priv->rx_skbuff[entry] = NULL;
- priv->rx_zeroc_thresh++;
+ rx_q->rx_skbuff[entry] = NULL;
+ rx_q->rx_zeroc_thresh++;
skb_put(skb, frame_len);
dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
+ rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
@@ -2634,7 +3426,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
+ napi_gro_receive(&rx_q->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -2642,7 +3434,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
entry = next_entry;
}
- stmmac_rx_refill(priv);
+ stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count;
@@ -2659,16 +3451,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
- struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ struct stmmac_rx_queue *rx_q =
+ container_of(napi, struct stmmac_rx_queue, napi);
+ struct stmmac_priv *priv = rx_q->priv_data;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 chan = rx_q->queue_index;
int work_done = 0;
+ u32 queue;
priv->xstats.napi_poll++;
- stmmac_tx_clean(priv);
- work_done = stmmac_rx(priv, budget);
+ /* check all the queues */
+ for (queue = 0; queue < tx_count; queue++)
+ stmmac_tx_clean(priv, queue);
+
+ work_done = stmmac_rx(priv, budget, rx_q->queue_index);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv);
+ stmmac_enable_dma_irq(priv, chan);
}
return work_done;
}
@@ -2684,9 +3484,12 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
static void stmmac_tx_timeout(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 chan;
/* Clear Tx resources and restart transmitting again */
- stmmac_tx_err(priv);
+ for (chan = 0; chan < tx_count; chan++)
+ stmmac_tx_err(priv, chan);
}
/**
@@ -2795,6 +3598,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queues_count;
+ u32 queue;
+
+ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
@@ -2808,16 +3617,30 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
+
if (unlikely(status)) {
/* For LPI we need to save the tx status */
if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
- priv->rx_tail_addr,
- STMMAC_CHAN0);
+ }
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ for (queue = 0; queue < queues_count; queue++) {
+ struct stmmac_rx_queue *rx_q =
+ &priv->rx_queue[queue];
+
+ status |=
+ priv->hw->mac->host_mtl_irq_status(priv->hw,
+ queue);
+
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
+ priv->hw->dma->set_rx_tail_ptr)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ rx_q->rx_tail_addr,
+ queue);
+ }
}
/* PCS link status */
@@ -2915,17 +3738,40 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
- if (priv->extend_desc) {
- seq_printf(seq, "Extended RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
- seq_printf(seq, "Extended TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
- } else {
- seq_printf(seq, "RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
- seq_printf(seq, "TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ seq_printf(seq, "RX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_erx,
+ DMA_RX_SIZE, 1, seq);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_rx,
+ DMA_RX_SIZE, 0, seq);
+ }
+ }
+
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ seq_printf(seq, "TX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_etx,
+ DMA_TX_SIZE, 1, seq);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_tx,
+ DMA_TX_SIZE, 0, seq);
+ }
}
return 0;
@@ -3208,11 +4054,14 @@ int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
- int ret = 0;
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
+ int ret = 0;
+ u32 queue;
- ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES,
+ MTL_MAX_RX_QUEUES);
if (!ndev)
return -ENOMEM;
@@ -3254,6 +4103,10 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -3303,7 +4156,12 @@ int stmmac_dvr_probe(struct device *device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
- netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
+ (8 * priv->plat->rx_queues_to_use));
+ }
spin_lock_init(&priv->lock);
@@ -3348,7 +4206,11 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- netif_napi_del(&priv->napi);
+ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ netif_napi_del(&rx_q->napi);
+ }
error_hw_init:
free_netdev(ndev);
@@ -3369,10 +4231,9 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
- priv->hw->dma->stop_rx(priv->ioaddr);
- priv->hw->dma->stop_tx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
@@ -3411,20 +4272,19 @@ int stmmac_suspend(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
netif_device_detach(ndev);
- netif_stop_queue(ndev);
+ stmmac_stop_all_queues(priv);
- napi_disable(&priv->napi);
+ stmmac_disable_all_queues(priv);
/* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
@@ -3440,6 +4300,31 @@ int stmmac_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @dev: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+ }
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ }
+}
+
+/**
* stmmac_resume - resume callback
* @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
@@ -3479,10 +4364,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
- priv->cur_rx = 0;
- priv->dirty_rx = 0;
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
+ stmmac_reset_queues_param(priv);
+
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
@@ -3494,9 +4377,9 @@ int stmmac_resume(struct device *dev)
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
- napi_enable(&priv->napi);
+ stmmac_enable_all_queues(priv);
- netif_start_queue(ndev);
+ stmmac_start_all_queues(priv);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 5c9e462276b9..39be96779145 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -32,6 +32,7 @@
*/
struct stmmac_pci_dmi_data {
const char *name;
+ const char *asset_tag;
unsigned int func;
int phy_addr;
};
@@ -46,6 +47,7 @@ struct stmmac_pci_info {
static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
{
const char *name = dmi_get_system_info(DMI_BOARD_NAME);
+ const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
unsigned int func = PCI_FUNC(info->pdev->devfn);
struct stmmac_pci_dmi_data *dmi;
@@ -57,8 +59,12 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
return 1;
for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
- if (!strcmp(dmi->name, name) && dmi->func == func)
+ if (!strcmp(dmi->name, name) && dmi->func == func) {
+ /* If asset tag is provided, match on it as well. */
+ if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
+ continue;
return dmi->phy_addr;
+ }
}
return -ENODEV;
@@ -88,6 +94,17 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -142,6 +159,24 @@ static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
.func = 6,
.phy_addr = 1,
},
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-0YA2",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-1YA2",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-1YA2",
+ .func = 7,
+ .phy_addr = 1,
+ },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 433a84239a68..7fc3a1ef395a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
if (!np)
return NULL;
- axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
if (!axi) {
of_node_put(np);
return ERR_PTR(-ENOMEM);
@@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
}
/**
+ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
+ * @pdev: platform device
+ */
+static void stmmac_mtl_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *q_node;
+ struct device_node *rx_node;
+ struct device_node *tx_node;
+ u8 queue = 0;
+
+ /* For backwards-compatibility with device trees that don't have any
+ * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
+ * to one RX and TX queues each.
+ */
+ plat->rx_queues_to_use = 1;
+ plat->tx_queues_to_use = 1;
+
+ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+ if (!rx_node)
+ return;
+
+ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+ if (!tx_node) {
+ of_node_put(rx_node);
+ return;
+ }
+
+ /* Processing RX queues common config */
+ if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use))
+ plat->rx_queues_to_use = 1;
+
+ if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+ else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
+ else
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ /* Processing individual RX queue config */
+ for_each_child_of_node(rx_node, q_node) {
+ if (queue >= plat->rx_queues_to_use)
+ break;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ else
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+
+ if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan))
+ plat->rx_queues_cfg[queue].chan = queue;
+ /* TODO: Dynamic mapping to be included in the future */
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio)) {
+ plat->rx_queues_cfg[queue].prio = 0;
+ plat->rx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->rx_queues_cfg[queue].use_prio = true;
+ }
+
+ /* RX queue specific packet type routing */
+ if (of_property_read_bool(q_node, "snps,route-avcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-ptp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
+ else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-up"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
+ else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
+ else
+ plat->rx_queues_cfg[queue].pkt_route = 0x0;
+
+ queue++;
+ }
+
+ /* Processing TX queues common config */
+ if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use))
+ plat->tx_queues_to_use = 1;
+
+ if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+ else
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+
+ queue = 0;
+
+ /* Processing individual TX queue config */
+ for_each_child_of_node(tx_node, q_node) {
+ if (queue >= plat->tx_queues_to_use)
+ break;
+
+ if (of_property_read_u8(q_node, "snps,weight",
+ &plat->tx_queues_cfg[queue].weight))
+ plat->tx_queues_cfg[queue].weight = 0x10 + queue;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ } else if (of_property_read_bool(q_node,
+ "snps,avb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+
+ /* Credit Base Shaper parameters used by AVB */
+ if (of_property_read_u32(q_node, "snps,send_slope",
+ &plat->tx_queues_cfg[queue].send_slope))
+ plat->tx_queues_cfg[queue].send_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,idle_slope",
+ &plat->tx_queues_cfg[queue].idle_slope))
+ plat->tx_queues_cfg[queue].idle_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,high_credit",
+ &plat->tx_queues_cfg[queue].high_credit))
+ plat->tx_queues_cfg[queue].high_credit = 0x0;
+ if (of_property_read_u32(q_node, "snps,low_credit",
+ &plat->tx_queues_cfg[queue].low_credit))
+ plat->tx_queues_cfg[queue].low_credit = 0x0;
+ } else {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio)) {
+ plat->tx_queues_cfg[queue].prio = 0;
+ plat->tx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->tx_queues_cfg[queue].use_prio = true;
+ }
+
+ queue++;
+ }
+
+ of_node_put(rx_node);
+ of_node_put(tx_node);
+ of_node_put(q_node);
+}
+
+/**
* stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
* @plat: driver data platform structure
* @np: device tree node
@@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev);
+ stmmac_mtl_setup(pdev, plat);
+
/* clock setup */
plat->stmmac_clk = devm_clk_get(&pdev->dev,
STMMAC_RESOURCE_NAME);
@@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
clk_prepare_enable(plat->pclk);
/* Fall-back to main clock in case of no PTP ref is passed */
- plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
if (IS_ERR(plat->clk_ptp_ref)) {
plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
plat->clk_ptp_ref = NULL;
dev_warn(&pdev->dev, "PTP uses main clock\n");
} else {
- clk_prepare_enable(plat->clk_ptp_ref);
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0e8e89f17dbb..382993c1561c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
}
/* Must be invoked under cp->lock */
-static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+static void cas_begin_auto_negotiation(struct cas *cp,
+ const struct ethtool_link_ksettings *ep)
{
u16 ctl;
#if 1
@@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
if (!ep)
goto start_aneg;
lcntl = cp->link_cntl;
- if (ep->autoneg == AUTONEG_ENABLE)
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
cp->link_cntl = BMCR_ANENABLE;
- else {
- u32 speed = ethtool_cmd_speed(ep);
+ } else {
+ u32 speed = ep->base.speed;
cp->link_cntl = 0;
if (speed == SPEED_100)
cp->link_cntl |= BMCR_SPEED100;
else if (speed == SPEED_1000)
cp->link_cntl |= CAS_BMCR_SPEED1000;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
cp->link_cntl |= BMCR_FULLDPLX;
}
#if 1
@@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
-static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
u16 bmcr;
int full_duplex, speed, pause;
unsigned long flags;
enum link_state linkstate = link_up;
+ u32 supported, advertising;
- cmd->advertising = 0;
- cmd->supported = SUPPORTED_Autoneg;
+ advertising = 0;
+ supported = SUPPORTED_Autoneg;
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
- cmd->supported |= SUPPORTED_1000baseT_Full;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
/* Record PHY settings if HW is on. */
@@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bmcr = 0;
linkstate = cp->lstate;
if (CAS_PHY_MII(cp->phy_type)) {
- cmd->port = PORT_MII;
- cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
- cmd->phy_address = cp->phy_addr;
- cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = cp->phy_addr;
+ advertising |= ADVERTISED_TP | ADVERTISED_MII |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->supported |=
+ supported |=
(SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
} else {
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = 0;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
/* pcs uses the same bits as mii */
@@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irqrestore(&cp->lock, flags);
if (bmcr & BMCR_ANENABLE) {
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
- ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.speed = ((speed == 10) ?
SPEED_10 :
((speed == 1000) ?
- SPEED_1000 : SPEED_100)));
- cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ SPEED_1000 : SPEED_100));
+ cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
SPEED_1000 :
((bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10)));
- cmd->duplex =
- (bmcr & BMCR_FULLDPLX) ?
+ SPEED_100 : SPEED_10));
+ cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
if (linkstate != link_up) {
@@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* settings that we configured.
*/
if (cp->link_cntl & BMCR_ANENABLE) {
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (cp->link_cntl & BMCR_SPEED100) {
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
}
- cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
}
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
- .get_settings = cas_get_settings,
- .set_settings = cas_set_settings,
.nway_reset = cas_nway_reset,
.get_link = cas_get_link,
.get_msglevel = cas_get_msglevel,
@@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = {
.get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
+ .get_link_ksettings = cas_get_link_ksettings,
+ .set_link_ksettings = cas_set_link_ksettings,
};
static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 89952deae47f..5a90fed06260 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -1,6 +1,6 @@
/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
*
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,8 +41,8 @@
static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
#define DRV_MODULE_NAME "ldmvsw"
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "February 3, 2017"
+#define DRV_MODULE_VERSION "1.2"
+#define DRV_MODULE_RELDATE "March 4, 2017"
static char version[] =
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
@@ -123,6 +123,20 @@ static void vsw_set_rx_mode(struct net_device *dev)
return sunvnet_set_rx_mode_common(dev, port->vp);
}
+int ldmvsw_open(struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+ struct vio_driver_state *vio = &port->vio;
+
+ /* reset the channel */
+ vio_link_state_change(vio, LDC_EVENT_RESET);
+ vnet_port_reset(port);
+ vio_port_up(vio);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ldmvsw_open);
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vsw_poll_controller(struct net_device *dev)
{
@@ -133,7 +147,7 @@ static void vsw_poll_controller(struct net_device *dev)
#endif
static const struct net_device_ops vsw_ops = {
- .ndo_open = sunvnet_open_common,
+ .ndo_open = ldmvsw_open,
.ndo_stop = sunvnet_close_common,
.ndo_set_rx_mode = vsw_set_rx_mode,
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
@@ -365,6 +379,11 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
napi_enable(&port->napi);
vio_port_up(&port->vio);
+ /* assure no carrier until we receive an LDC_EVENT_UP,
+ * even if the vsw config script tries to force us up
+ */
+ netif_carrier_off(dev);
+
netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr);
pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 57978056b336..2dcca249eb9c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp;
@@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lp = &np->link_config;
memset(cmd, 0, sizeof(*cmd));
- cmd->phy_address = np->phy_addr;
- cmd->supported = lp->supported;
- cmd->advertising = lp->active_advertising;
- cmd->autoneg = lp->active_autoneg;
- ethtool_cmd_speed_set(cmd, lp->active_speed);
- cmd->duplex = lp->active_duplex;
- cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
- cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
+ cmd->base.phy_address = np->phy_addr;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ lp->supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ lp->active_advertising);
+ cmd->base.autoneg = lp->active_autoneg;
+ cmd->base.speed = lp->active_speed;
+ cmd->base.duplex = lp->active_duplex;
+ cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
return 0;
}
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp = &np->link_config;
- lp->advertising = cmd->advertising;
- lp->speed = ethtool_cmd_speed(cmd);
- lp->duplex = cmd->duplex;
- lp->autoneg = cmd->autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
+ cmd->link_modes.advertising);
+ lp->speed = cmd->base.speed;
+ lp->duplex = cmd->base.duplex;
+ lp->autoneg = cmd->base.autoneg;
return niu_init_link(np);
}
@@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = {
.nway_reset = niu_nway_reset,
.get_eeprom_len = niu_get_eeprom_len,
.get_eeprom = niu_get_eeprom,
- .get_settings = niu_get_settings,
- .set_settings = niu_set_settings,
.get_strings = niu_get_strings,
.get_sset_count = niu_get_sset_count,
.get_ethtool_stats = niu_get_ethtool_stats,
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_link_ksettings = niu_get_link_ksettings,
+ .set_link_ksettings = niu_set_link_ksettings,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c4caf486cbef..3189722110c2 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -169,7 +169,7 @@ static void bigmac_stop(struct bigmac *bp)
static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
{
- struct net_device_stats *stats = &bp->enet_stats;
+ struct net_device_stats *stats = &bp->dev->stats;
stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
sbus_writel(0, bregs + BMAC_RCRCECTR);
@@ -774,8 +774,8 @@ static void bigmac_tx(struct bigmac *bp)
if (this->tx_flags & TXD_OWN)
break;
skb = bp->tx_skbs[elem];
- bp->enet_stats.tx_packets++;
- bp->enet_stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dma_unmap_single(&bp->bigmac_op->dev,
this->tx_addr, skb->len,
DMA_TO_DEVICE);
@@ -811,12 +811,12 @@ static void bigmac_rx(struct bigmac *bp)
/* Check for errors. */
if (len < ETH_ZLEN) {
- bp->enet_stats.rx_errors++;
- bp->enet_stats.rx_length_errors++;
+ bp->dev->stats.rx_errors++;
+ bp->dev->stats.rx_length_errors++;
drop_it:
/* Return it to the BigMAC. */
- bp->enet_stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
this->rx_flags =
(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
goto next;
@@ -875,8 +875,8 @@ static void bigmac_rx(struct bigmac *bp)
/* No checksums done by the BigMAC ;-( */
skb->protocol = eth_type_trans(skb, bp->dev);
netif_rx(skb);
- bp->enet_stats.rx_packets++;
- bp->enet_stats.rx_bytes += len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
@@ -987,7 +987,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
struct bigmac *bp = netdev_priv(dev);
bigmac_get_counters(bp, bp->bregs);
- return &bp->enet_stats;
+ return &dev->stats;
}
static void bigmac_set_multicast(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 532fc56830cf..ee56930475a8 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -311,7 +311,6 @@ struct bigmac {
enum bigmac_timer_state timer_state;
unsigned int timer_ticks;
- struct net_device_stats enet_stats;
struct platform_device *qec_op;
struct platform_device *bigmac_op;
struct net_device *dev;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5c5952e782cd..fa607d062cb3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1250,12 +1250,18 @@ static void gem_stop_dma(struct gem *gp)
// XXX dbl check what that function should do when called on PCS PHY
-static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+static void gem_begin_auto_negotiation(struct gem *gp,
+ const struct ethtool_link_ksettings *ep)
{
u32 advertise, features;
int autoneg;
int speed;
int duplex;
+ u32 advertising;
+
+ if (ep)
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, ep->link_modes.advertising);
if (gp->phy_type != phy_mii_mdio0 &&
gp->phy_type != phy_mii_mdio1)
@@ -1278,13 +1284,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
/* Setup link parameters */
if (!ep)
goto start_aneg;
- if (ep->autoneg == AUTONEG_ENABLE) {
- advertise = ep->advertising;
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
+ advertise = advertising;
autoneg = 1;
} else {
autoneg = 0;
- speed = ethtool_cmd_speed(ep);
- duplex = ep->duplex;
+ speed = ep->base.speed;
+ duplex = ep->base.duplex;
}
start_aneg:
@@ -2515,85 +2521,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
-static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
+ u32 supported, advertising;
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
if (gp->phy_mii.def)
- cmd->supported = gp->phy_mii.def->features;
+ supported = gp->phy_mii.def->features;
else
- cmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Return current PHY settings */
- cmd->autoneg = gp->want_autoneg;
- ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
- cmd->duplex = gp->phy_mii.duplex;
- cmd->advertising = gp->phy_mii.advertising;
+ cmd->base.autoneg = gp->want_autoneg;
+ cmd->base.speed = gp->phy_mii.speed;
+ cmd->base.duplex = gp->phy_mii.duplex;
+ advertising = gp->phy_mii.advertising;
/* If we started with a forced mode, we don't have a default
* advertise set, we need to return something sensible so
* userland can re-enable autoneg properly.
*/
- if (cmd->advertising == 0)
- cmd->advertising = cmd->supported;
+ if (advertising == 0)
+ advertising = supported;
} else { // XXX PCS ?
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg);
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = cmd->port = cmd->phy_address =
- cmd->transceiver = cmd->autoneg = 0;
+ advertising = supported;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0;
+ cmd->base.port = 0;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = 0;
/* serdes means usually a Fibre connector, with most fixed */
if (gp->phy_type == phy_serdes) {
- cmd->port = PORT_FIBRE;
- cmd->supported = (SUPPORTED_1000baseT_Half |
+ cmd->base.port = PORT_FIBRE;
+ supported = (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE | SUPPORTED_Autoneg |
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- cmd->advertising = cmd->supported;
- cmd->transceiver = XCVR_INTERNAL;
+ advertising = supported;
if (gp->lstate == link_up)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = 1;
}
}
- cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE &&
- cmd->advertising == 0)
+ if (cmd->base.autoneg == AUTONEG_ENABLE &&
+ advertising == 0)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -2666,13 +2683,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops gem_ethtool_ops = {
.get_drvinfo = gem_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = gem_get_settings,
- .set_settings = gem_set_settings,
.nway_reset = gem_nway_reset,
.get_msglevel = gem_get_msglevel,
.set_msglevel = gem_set_msglevel,
.get_wol = gem_get_wol,
.set_wol = gem_set_wol,
+ .get_link_ksettings = gem_get_link_ksettings,
+ .set_link_ksettings = gem_set_link_ksettings,
};
static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72ff05cd3ed8..9e983e1d8249 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -933,7 +933,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* hp->happy_lock must be held */
static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
{
- struct net_device_stats *stats = &hp->net_stats;
+ struct net_device_stats *stats = &hp->dev->stats;
stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
@@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp)
}
/* hp->happy_lock must be held */
-static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
- void __iomem *tregs,
- struct ethtool_cmd *ep)
+static void
+happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+ void __iomem *tregs,
+ const struct ethtool_link_ksettings *ep)
{
int timeout;
@@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
/* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
/* Advertise everything we can support. */
if (hp->sw_bmsr & BMSR_10HALF)
hp->sw_advertise |= (ADVERTISE_10HALF);
@@ -1384,14 +1385,14 @@ force_link:
/* Disable auto-negotiation in BMCR, enable the duplex and
* speed setting, init the timer state machine, and fire it off.
*/
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
hp->sw_bmcr = BMCR_SPEED100;
} else {
- if (ethtool_cmd_speed(ep) == SPEED_100)
+ if (ep->base.speed == SPEED_100)
hp->sw_bmcr = BMCR_SPEED100;
else
hp->sw_bmcr = 0;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
hp->sw_bmcr |= BMCR_FULLDPLX;
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1856,7 +1857,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
if (status & GREG_STAT_TXLERR)
printk("LateError ");
if (status & GREG_STAT_TXPERR)
- printk("ParityErro ");
+ printk("ParityError ");
if (status & GREG_STAT_TXTERR)
printk("TagBotch ");
printk("]\n");
@@ -1946,7 +1947,7 @@ static void happy_meal_tx(struct happy_meal *hp)
break;
}
hp->tx_skbs[elem] = NULL;
- hp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
dma_addr = hme_read_desc32(hp, &this->tx_addr);
@@ -1963,7 +1964,7 @@ static void happy_meal_tx(struct happy_meal *hp)
}
dev_kfree_skb_irq(skb);
- hp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
}
hp->tx_old = elem;
TXD((">"));
@@ -2008,17 +2009,17 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
RXD(("ERR(%08x)]", flags));
- hp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- hp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & (RXFLAG_OVERFLOW >> 16)) {
- hp->net_stats.rx_over_errors++;
- hp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
}
/* Return it to the Happy meal. */
drop_it:
- hp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_addr);
@@ -2083,8 +2084,8 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- hp->net_stats.rx_packets++;
- hp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
@@ -2395,7 +2396,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
happy_meal_get_counters(hp, hp->bigmacregs);
spin_unlock_irq(&hp->happy_lock);
- return &hp->net_stats;
+ return &dev->stats;
}
static void happy_meal_set_multicast(struct net_device *dev)
@@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev)
}
/* Ethtool support... */
-static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
u32 speed;
+ u32 supported;
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_TP; /* XXX no MII support */
- cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_TP; /* XXX no MII support */
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Record PHY settings. */
spin_lock_irq(&hp->happy_lock);
@@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irq(&hp->happy_lock);
if (hp->sw_bmcr & BMCR_ANENABLE) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
if (speed == SPEED_100)
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_100FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
else
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_10FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
- ethtool_cmd_speed_set(cmd, speed);
+ cmd->base.speed = speed;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
return 0;
}
-static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- ((ethtool_cmd_speed(cmd) != SPEED_100 &&
- ethtool_cmd_speed(cmd) != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Ok, do it to it. */
@@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev)
}
static const struct ethtool_ops hme_ethtool_ops = {
- .get_settings = hme_get_settings,
- .set_settings = hme_set_settings,
.get_drvinfo = hme_get_drvinfo,
.get_link = hme_get_link,
+ .get_link_ksettings = hme_get_link_ksettings,
+ .set_link_ksettings = hme_set_link_ksettings,
};
static int hme_version_printed;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 4a8d5b18dfd5..3af540adb3c5 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -418,8 +418,6 @@ struct happy_meal {
int rx_new, tx_new, rx_old, tx_old;
- struct net_device_stats net_stats; /* Statistical counters */
-
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
u32 (*read32)(void __iomem *);
void (*write32)(void __iomem *, u32);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 4cc2571f71c6..0b95105f7060 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1,7 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -77,11 +77,125 @@ static void vnet_set_msglevel(struct net_device *dev, u32 value)
vp->msg_enable = value;
}
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx_packets" },
+ { "tx_packets" },
+ { "rx_bytes" },
+ { "tx_bytes" },
+ { "rx_errors" },
+ { "tx_errors" },
+ { "rx_dropped" },
+ { "tx_dropped" },
+ { "multicast" },
+ { "rx_length_errors" },
+ { "rx_frame_errors" },
+ { "rx_missed_errors" },
+ { "tx_carrier_errors" },
+ { "nports" },
+};
+
+static int vnet_get_sset_count(struct net_device *dev, int sset)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stats_keys)
+ + (NUM_VNET_PORT_STATS * vp->nports);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+ struct vnet_port *port;
+ char *p = (char *)buf;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ p += sizeof(ethtool_stats_keys);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
+ port->q_index, port->switch_port ? "s" : "q",
+ port->raddr);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ }
+ rcu_read_unlock();
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void vnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+ struct vnet_port *port;
+ int i = 0;
+
+ data[i++] = dev->stats.rx_packets;
+ data[i++] = dev->stats.tx_packets;
+ data[i++] = dev->stats.rx_bytes;
+ data[i++] = dev->stats.tx_bytes;
+ data[i++] = dev->stats.rx_errors;
+ data[i++] = dev->stats.tx_errors;
+ data[i++] = dev->stats.rx_dropped;
+ data[i++] = dev->stats.tx_dropped;
+ data[i++] = dev->stats.multicast;
+ data[i++] = dev->stats.rx_length_errors;
+ data[i++] = dev->stats.rx_frame_errors;
+ data[i++] = dev->stats.rx_missed_errors;
+ data[i++] = dev->stats.tx_carrier_errors;
+ data[i++] = vp->nports;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ data[i++] = port->q_index;
+ data[i++] = port->stats.rx_packets;
+ data[i++] = port->stats.tx_packets;
+ data[i++] = port->stats.rx_bytes;
+ data[i++] = port->stats.tx_bytes;
+ data[i++] = port->stats.event_up;
+ data[i++] = port->stats.event_reset;
+ }
+ rcu_read_unlock();
+}
+
static const struct ethtool_ops vnet_ethtool_ops = {
.get_drvinfo = vnet_get_drvinfo,
.get_msglevel = vnet_get_msglevel,
.set_msglevel = vnet_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_sset_count = vnet_get_sset_count,
+ .get_strings = vnet_get_strings,
+ .get_ethtool_stats = vnet_get_ethtool_stats,
};
static LIST_HEAD(vnet_list);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index fa2d11ca9b81..9e86833249d4 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1,7 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#include <linux/module.h>
@@ -43,7 +43,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
-static void vnet_port_reset(struct vnet_port *port);
static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
{
@@ -410,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+ if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
+ dev->stats.multicast++;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+ port->stats.rx_packets++;
+ port->stats.rx_bytes += len;
napi_gro_receive(&port->napi, skb);
return 0;
@@ -747,6 +750,13 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
/* RESET takes precedent over any other event */
if (port->rx_event & LDC_EVENT_RESET) {
+ /* a link went down */
+
+ if (port->vsw == 1) {
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ }
+
vio_link_state_change(vio, LDC_EVENT_RESET);
vnet_port_reset(port);
vio_port_up(vio);
@@ -762,12 +772,21 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
maybe_tx_wakeup(port);
port->rx_event = 0;
+ port->stats.event_reset++;
return 0;
}
if (port->rx_event & LDC_EVENT_UP) {
+ /* a link came up */
+
+ if (port->vsw == 1) {
+ netif_carrier_on(port->dev);
+ netif_tx_start_all_queues(port->dev);
+ }
+
vio_link_state_change(vio, LDC_EVENT_UP);
port->rx_event = 0;
+ port->stats.event_up++;
return 0;
}
@@ -1417,6 +1436,8 @@ ldc_start_done:
dev->stats.tx_packets++;
dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
+ port->stats.tx_packets++;
+ port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
@@ -1631,7 +1652,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
}
EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
-static void vnet_port_reset(struct vnet_port *port)
+void vnet_port_reset(struct vnet_port *port)
{
del_timer(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
@@ -1639,6 +1660,7 @@ static void vnet_port_reset(struct vnet_port *port)
port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
port->tsolen = 0;
}
+EXPORT_SYMBOL_GPL(vnet_port_reset);
static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
@@ -1708,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
void sunvnet_port_add_txq_common(struct vnet_port *port)
{
struct vnet *vp = port->vp;
- int n;
+ int smallest = 0;
+ int i;
+
+ /* find the first least-used q
+ * When there are more ldoms than q's, we start to
+ * double up on ports per queue.
+ */
+ for (i = 0; i < VNET_MAX_TXQS; i++) {
+ if (vp->q_used[i] == 0) {
+ smallest = i;
+ break;
+ }
+ if (vp->q_used[i] < vp->q_used[smallest])
+ smallest = i;
+ }
- n = vp->nports++;
- n = n & (VNET_MAX_TXQS - 1);
- port->q_index = n;
- netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ vp->nports++;
+ vp->q_used[smallest]++;
+ port->q_index = smallest;
}
EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
void sunvnet_port_rm_txq_common(struct vnet_port *port)
{
port->vp->nports--;
- netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ port->vp->q_used[port->q_index]--;
+ port->q_index = 0;
}
EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index ce5c824128a3..b20d6fa7ef25 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -35,6 +35,19 @@ struct vnet_tx_entry {
struct vnet;
+struct vnet_port_stats {
+ /* keep them all the same size */
+ u32 rx_bytes;
+ u32 tx_bytes;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 event_up;
+ u32 event_reset;
+ u32 q_placeholder;
+};
+
+#define NUM_VNET_PORT_STATS (sizeof(struct vnet_port_stats) / sizeof(u32))
+
/* Structure to describe a vnet-port or vsw-port in the MD.
* If the vsw bit is set, this structure represents a vswitch
* port, and the net_device can be found from ->dev. If the
@@ -44,6 +57,8 @@ struct vnet;
struct vnet_port {
struct vio_driver_state vio;
+ struct vnet_port_stats stats;
+
struct hlist_node hash;
u8 raddr[ETH_ALEN];
unsigned switch_port:1;
@@ -97,22 +112,15 @@ struct vnet_mcast_entry {
};
struct vnet {
- /* Protects port_list and port_hash. */
- spinlock_t lock;
-
+ spinlock_t lock; /* Protects port_list and port_hash. */
struct net_device *dev;
-
u32 msg_enable;
-
+ u8 q_used[VNET_MAX_TXQS];
struct list_head port_list;
-
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
-
struct vnet_mcast_entry *mcast_list;
-
struct list_head list;
u64 local_mac;
-
int nports;
};
@@ -139,6 +147,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
int sunvnet_poll_common(struct napi_struct *napi, int budget);
void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
+void vnet_port_reset(struct vnet_port *port);
bool sunvnet_port_is_up_common(struct vnet_port *vnet);
void sunvnet_port_add_txq_common(struct vnet_port *port);
void sunvnet_port_rm_txq_common(struct vnet_port *port);
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..a9503884e1c2
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,41 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config DWC_XLGMAC
+ tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support"
+ depends on HAS_IOMEM && HAS_DMA
+ select BITREVERSE
+ select CRC32
+ ---help---
+ This driver supports the Synopsys DesignWare Cores Enterprise
+ Ethernet (dwc-xlgmac).
+
+if DWC_XLGMAC
+
+config DWC_XLGMAC_PCI
+ tristate "XLGMAC PCI bus support"
+ depends on DWC_XLGMAC && PCI
+ ---help---
+ This selects the pci bus support for the dwc-xlgmac driver.
+ This driver was tested on Synopsys XLGMAC IP Prototyping Kit.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
+endif # DWC_XLGMAC
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..0ad01916f11e
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o
+dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \
+ dwc-xlgmac-hw.o dwc-xlgmac-common.o \
+ dwc-xlgmac-ethtool.o
+
+dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
new file mode 100644
index 000000000000..d655a4261e98
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -0,0 +1,737 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
+
+static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+
+ /* Currently it uses a static mac address for test */
+ memcpy(pdata->mac_addr, dev_addr, netdev->addr_len);
+}
+
+static void xlgmac_default_config(struct xlgmac_pdata *pdata)
+{
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_128;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_128;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_25000;
+ pdata->sysclk_rate = XLGMAC_SYSCLOCK;
+
+ strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+}
+
+static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
+{
+ xlgmac_init_desc_ops(&pdata->desc_ops);
+ xlgmac_init_hw_ops(&pdata->hw_ops);
+}
+
+static int xlgmac_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ /* Set default configuration data */
+ xlgmac_default_config(pdata);
+
+ /* Set irq, base_addr, MAC address, */
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->mac_regs;
+ xlgmac_read_mac_addr(pdata);
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xlgmac_init_all_ops(pdata);
+
+ /* Issue software reset to device */
+ hw_ops->exit(pdata);
+
+ /* Populate the hardware features */
+ xlgmac_get_all_hw_features(pdata);
+ xlgmac_print_all_hw_features(pdata);
+
+ /* TODO: Set the PHY mode to XLGMII */
+
+ /* Set the DMA mask */
+ ret = dma_set_mask_and_coherent(pdata->dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n");
+ return ret;
+ }
+
+ /* Channel and ring params initializtion
+ * pdata->channel_count;
+ * pdata->tx_ring_count;
+ * pdata->rx_ring_count;
+ * pdata->tx_desc_count;
+ * pdata->rx_desc_count;
+ */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT);
+ pdata->tx_desc_count = XLGMAC_TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT);
+ pdata->rx_desc_count = XLGMAC_RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->hw_feat.tx_q_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real tx queue count\n");
+ return ret;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->hw_feat.rx_q_cnt);
+ pdata->rx_q_count = pdata->rx_ring_count;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real rx queue count\n");
+ return ret;
+ }
+
+ pdata->channel_count =
+ max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++)
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ i % pdata->rx_ring_count);
+
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_IP2TE_POS,
+ MAC_RSSCR_IP2TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_TCP4TE_POS,
+ MAC_RSSCR_TCP4TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_UDP4TE_POS,
+ MAC_RSSCR_UDP4TE_LEN, 1);
+
+ /* Set device operations */
+ netdev->netdev_ops = xlgmac_get_netdev_ops();
+ netdev->ethtool_ops = xlgmac_get_ethtool_ops();
+
+ /* Set device features */
+ if (pdata->hw_feat.tso) {
+ netdev->hw_features = NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ } else if (pdata->hw_feat.tx_coe) {
+ netdev->hw_features = NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ }
+
+ if (pdata->hw_feat.rx_coe) {
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ }
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (pdata->hw_feat.sa_vlan_ins)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (pdata->hw_feat.vlhash)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
+ /* Tx coalesce parameters initialization */
+ pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES;
+
+ /* Rx coalesce parameters initialization */
+ pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES;
+
+ return 0;
+}
+
+int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res)
+{
+ struct xlgmac_pdata *pdata;
+ struct net_device *netdev;
+ int ret;
+
+ netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata),
+ XLGMAC_MAX_DMA_CHANNELS);
+
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, dev);
+ dev_set_drvdata(dev, netdev);
+ pdata = netdev_priv(netdev);
+ pdata->dev = dev;
+ pdata->netdev = netdev;
+
+ pdata->dev_irq = res->irq;
+ pdata->mac_regs = res->addr;
+
+ mutex_init(&pdata->rss_mutex);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ ret = xlgmac_init(pdata);
+ if (ret) {
+ dev_err(dev, "xlgmac init failed\n");
+ goto err_free_netdev;
+ }
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_free_netdev;
+ }
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return ret;
+}
+
+int xlgmac_drv_remove(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+
+ return 0;
+}
+
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ while (count--) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+
+ idx++;
+ }
+}
+
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx,
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+}
+
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+}
+
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_features *hw_feat = &pdata->hw_feat;
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+
+ mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R);
+ mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R);
+ mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = readl(pdata->mac_regs + MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_PHYIFSEL_POS,
+ MAC_HWF0R_PHYIFSEL_LEN);
+ hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_VLHASH_POS,
+ MAC_HWF0R_VLHASH_LEN);
+ hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SMASEL_POS,
+ MAC_HWF0R_SMASEL_LEN);
+ hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RWKSEL_POS,
+ MAC_HWF0R_RWKSEL_LEN);
+ hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MGKSEL_POS,
+ MAC_HWF0R_MGKSEL_LEN);
+ hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MMCSEL_POS,
+ MAC_HWF0R_MMCSEL_LEN);
+ hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ARPOFFSEL_POS,
+ MAC_HWF0R_ARPOFFSEL_LEN);
+ hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSEL_POS,
+ MAC_HWF0R_TSSEL_LEN);
+ hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_EEESEL_POS,
+ MAC_HWF0R_EEESEL_LEN);
+ hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TXCOESEL_POS,
+ MAC_HWF0R_TXCOESEL_LEN);
+ hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RXCOESEL_POS,
+ MAC_HWF0R_RXCOESEL_LEN);
+ hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ADDMACADRSEL_POS,
+ MAC_HWF0R_ADDMACADRSEL_LEN);
+ hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSTSSEL_POS,
+ MAC_HWF0R_TSSTSSEL_LEN);
+ hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SAVLANINS_POS,
+ MAC_HWF0R_SAVLANINS_LEN);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RXFIFOSIZE_POS,
+ MAC_HWF1R_RXFIFOSIZE_LEN);
+ hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TXFIFOSIZE_POS,
+ MAC_HWF1R_TXFIFOSIZE_LEN);
+ hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADVTHWORD_POS,
+ MAC_HWF1R_ADVTHWORD_LEN);
+ hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADDR64_POS,
+ MAC_HWF1R_ADDR64_LEN);
+ hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DCBEN_POS,
+ MAC_HWF1R_DCBEN_LEN);
+ hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_SPHEN_POS,
+ MAC_HWF1R_SPHEN_LEN);
+ hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TSOEN_POS,
+ MAC_HWF1R_TSOEN_LEN);
+ hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DBGMEMA_POS,
+ MAC_HWF1R_DBGMEMA_LEN);
+ hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RSSEN_POS,
+ MAC_HWF1R_RSSEN_LEN);
+ hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_NUMTC_POS,
+ MAC_HWF1R_NUMTC_LEN);
+ hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_HASHTBLSZ_POS,
+ MAC_HWF1R_HASHTBLSZ_LEN);
+ hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_L3L4FNUM_POS,
+ MAC_HWF1R_L3L4FNUM_LEN);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXQCNT_POS,
+ MAC_HWF2R_RXQCNT_LEN);
+ hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXQCNT_POS,
+ MAC_HWF2R_TXQCNT_LEN);
+ hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXCHCNT_POS,
+ MAC_HWF2R_RXCHCNT_LEN);
+ hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXCHCNT_POS,
+ MAC_HWF2R_TXCHCNT_LEN);
+ hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_PPSOUTNUM_POS,
+ MAC_HWF2R_PPSOUTNUM_LEN);
+ hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_AUXSNAPNUM_POS,
+ MAC_HWF2R_AUXSNAPNUM_LEN);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+}
+
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ char *str = NULL;
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+ XLGMAC_PR("HW support following features\n");
+ XLGMAC_PR("\n");
+ /* HW Feature Register0 */
+ XLGMAC_PR("VLAN Hash Filter Selected : %s\n",
+ pdata->hw_feat.vlhash ? "YES" : "NO");
+ XLGMAC_PR("SMA (MDIO) Interface : %s\n",
+ pdata->hw_feat.sma ? "YES" : "NO");
+ XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n",
+ pdata->hw_feat.rwk ? "YES" : "NO");
+ XLGMAC_PR("PMT Magic Packet Enable : %s\n",
+ pdata->hw_feat.mgk ? "YES" : "NO");
+ XLGMAC_PR("RMON/MMC Module Enable : %s\n",
+ pdata->hw_feat.mmc ? "YES" : "NO");
+ XLGMAC_PR("ARP Offload Enabled : %s\n",
+ pdata->hw_feat.aoe ? "YES" : "NO");
+ XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n",
+ pdata->hw_feat.ts ? "YES" : "NO");
+ XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n",
+ pdata->hw_feat.eee ? "YES" : "NO");
+ XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.tx_coe ? "YES" : "NO");
+ XLGMAC_PR("Receive Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.rx_coe ? "YES" : "NO");
+ XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n",
+ pdata->hw_feat.addn_mac ? "YES" : "NO");
+
+ switch (pdata->hw_feat.ts_src) {
+ case 0:
+ str = "RESERVED";
+ break;
+ case 1:
+ str = "INTERNAL";
+ break;
+ case 2:
+ str = "EXTERNAL";
+ break;
+ case 3:
+ str = "BOTH";
+ break;
+ }
+ XLGMAC_PR("Timestamp System Time Source : %s\n", str);
+
+ XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n",
+ pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
+
+ /* HW Feature Register1 */
+ switch (pdata->hw_feat.rx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Receive FIFO Size : %s\n", str);
+
+ switch (pdata->hw_feat.tx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str);
+
+ XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n",
+ pdata->hw_feat.adv_ts_hi ? "YES" : "NO");
+ XLGMAC_PR("Address width : %u\n",
+ pdata->hw_feat.dma_width);
+ XLGMAC_PR("DCB Feature Enable : %s\n",
+ pdata->hw_feat.dcb ? "YES" : "NO");
+ XLGMAC_PR("Split Header Feature Enable : %s\n",
+ pdata->hw_feat.sph ? "YES" : "NO");
+ XLGMAC_PR("TCP Segmentation Offload Enable : %s\n",
+ pdata->hw_feat.tso ? "YES" : "NO");
+ XLGMAC_PR("DMA Debug Registers Enabled : %s\n",
+ pdata->hw_feat.dma_debug ? "YES" : "NO");
+ XLGMAC_PR("RSS Feature Enabled : %s\n",
+ pdata->hw_feat.rss ? "YES" : "NO");
+ XLGMAC_PR("Number of Traffic classes : %u\n",
+ (pdata->hw_feat.tc_cnt));
+ XLGMAC_PR("Hash Table Size : %u\n",
+ pdata->hw_feat.hash_table_size);
+ XLGMAC_PR("Total number of L3 or L4 Filters : %u\n",
+ pdata->hw_feat.l3l4_filter_num);
+
+ /* HW Feature Register2 */
+ XLGMAC_PR("Number of MTL Receive Queues : %u\n",
+ pdata->hw_feat.rx_q_cnt);
+ XLGMAC_PR("Number of MTL Transmit Queues : %u\n",
+ pdata->hw_feat.tx_q_cnt);
+ XLGMAC_PR("Number of DMA Receive Channels : %u\n",
+ pdata->hw_feat.rx_ch_cnt);
+ XLGMAC_PR("Number of DMA Transmit Channels : %u\n",
+ pdata->hw_feat.tx_ch_cnt);
+
+ switch (pdata->hw_feat.pps_out_num) {
+ case 0:
+ str = "No PPS output";
+ break;
+ case 1:
+ str = "1 PPS output";
+ break;
+ case 2:
+ str = "2 PPS output";
+ break;
+ case 3:
+ str = "3 PPS output";
+ break;
+ case 4:
+ str = "4 PPS output";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of PPS Outputs : %s\n", str);
+
+ switch (pdata->hw_feat.aux_snap_num) {
+ case 0:
+ str = "No auxiliary input";
+ break;
+ case 1:
+ str = "1 auxiliary input";
+ break;
+ case 2:
+ str = "2 auxiliary input";
+ break;
+ case 3:
+ str = "3 auxiliary input";
+ break;
+ case 4:
+ str = "4 auxiliary input";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str);
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
new file mode 100644
index 000000000000..e9672b1f9968
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -0,0 +1,644 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data)
+{
+ if (desc_data->skb_dma) {
+ if (desc_data->mapped_as_page) {
+ dma_unmap_page(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ }
+ desc_data->skb_dma = 0;
+ desc_data->skb_dma_len = 0;
+ }
+
+ if (desc_data->skb) {
+ dev_kfree_skb_any(desc_data->skb);
+ desc_data->skb = NULL;
+ }
+
+ if (desc_data->rx.hdr.pa.pages)
+ put_page(desc_data->rx.hdr.pa.pages);
+
+ if (desc_data->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
+ desc_data->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.hdr.pa_unmap.pages);
+ }
+
+ if (desc_data->rx.buf.pa.pages)
+ put_page(desc_data->rx.buf.pa.pages);
+
+ if (desc_data->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
+ desc_data->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&desc_data->tx, 0, sizeof(desc_data->tx));
+ memset(&desc_data->rx, 0, sizeof(desc_data->rx));
+
+ desc_data->mapped_as_page = 0;
+
+ if (desc_data->state_saved) {
+ desc_data->state_saved = 0;
+ desc_data->state.skb = NULL;
+ desc_data->state.len = 0;
+ desc_data->state.error = 0;
+ }
+}
+
+static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->desc_data_head) {
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ kfree(ring->desc_data_head);
+ ring->desc_data_head = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->dma_desc_head) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ ring->dma_desc_count),
+ ring->dma_desc_head,
+ ring->dma_desc_head_addr);
+ ring->dma_desc_head = NULL;
+ }
+}
+
+static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int dma_desc_count)
+{
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->dma_desc_count = dma_desc_count;
+ ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ dma_desc_count),
+ &ring->dma_desc_head_addr,
+ GFP_KERNEL);
+ if (!ring->dma_desc_head)
+ return -ENOMEM;
+
+ /* Array of descriptor data */
+ ring->desc_data_head = kcalloc(dma_desc_count,
+ sizeof(struct xlgmac_desc_data),
+ GFP_KERNEL);
+ if (!ring->desc_data_head)
+ return -ENOMEM;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
+ ring->dma_desc_head,
+ &ring->dma_desc_head_addr,
+ ring->desc_data_head);
+
+ return 0;
+}
+
+static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (!pdata->channel_head)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xlgmac_free_ring(pdata, channel->tx_ring);
+ xlgmac_free_ring(pdata, channel->rx_ring);
+ }
+}
+
+static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring");
+ goto err_init_ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Rx ring\n");
+ goto err_init_ring;
+ }
+ }
+
+ return 0;
+
+err_init_ring:
+ xlgmac_free_rings(pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
+{
+ if (!pdata->channel_head)
+ return;
+
+ kfree(pdata->channel_head->tx_ring);
+ pdata->channel_head->tx_ring = NULL;
+
+ kfree(pdata->channel_head->rx_ring);
+ pdata->channel_head->rx_ring = NULL;
+
+ kfree(pdata->channel_head);
+
+ pdata->channel_head = NULL;
+ pdata->channel_count = 0;
+}
+
+static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel_head, *channel;
+ struct xlgmac_ring *tx_ring, *rx_ring;
+ int ret = -ENOMEM;
+ unsigned int i;
+
+ channel_head = kcalloc(pdata->channel_count,
+ sizeof(struct xlgmac_channel), GFP_KERNEL);
+ if (!channel_head)
+ return ret;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "channel_head=%p\n", channel_head);
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_head; i < pdata->channel_count;
+ i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the per DMA interrupt */
+ ret = pdata->channel_irq[i];
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count)
+ channel->tx_ring = tx_ring++;
+
+ if (i < pdata->rx_ring_count)
+ channel->rx_ring = rx_ring++;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
+ channel->name, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_head = channel_head;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_head);
+
+ return ret;
+}
+
+static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ xlgmac_free_rings(pdata);
+
+ xlgmac_free_channels(pdata);
+}
+
+static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ ret = xlgmac_alloc_channels(pdata);
+ if (ret)
+ goto err_alloc;
+
+ ret = xlgmac_alloc_rings(pdata);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_alloc:
+ xlgmac_free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
+ struct xlgmac_page_alloc *pa,
+ gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
+ put_page(pages);
+ return -ENOMEM;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
+ struct xlgmac_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
+ GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
+ GFP_ATOMIC, order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
+ XLGMAC_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_ops->tx_desc_init(channel);
+ }
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_ops->rx_desc_init(channel);
+ }
+}
+
+static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
+ struct sk_buff *skb)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int start_index, cur_index;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int offset, datalen, len;
+ struct xlgmac_pkt_info *pkt_info;
+ struct skb_frag_struct *frag;
+ unsigned int tso, vlan;
+ dma_addr_t skb_dma;
+ unsigned int i;
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ pkt_info = &ring->pkt_info;
+ pkt_info->desc_count = 0;
+ pkt_info->length = 0;
+
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
+ (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pkt_info->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = pkt_info->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, pkt_info->header_len);
+
+ offset = pkt_info->header_len;
+
+ pkt_info->length += pkt_info->header_len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ desc_data->mapped_as_page = 1;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so desc_data is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
+ desc_data->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ pkt_info->desc_count = cur_index - start_index;
+
+ return pkt_info->desc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ return 0;
+}
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
+{
+ desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+ desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
+ desc_ops->map_tx_skb = xlgmac_map_tx_skb;
+ desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
+ desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
+ desc_ops->tx_desc_init = xlgmac_tx_desc_init;
+ desc_ops->rx_desc_init = xlgmac_rx_desc_init;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
new file mode 100644
index 000000000000..fde722136869
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -0,0 +1,275 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+struct xlgmac_stats_desc {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define XLGMAC_STAT(str, var) \
+ { \
+ str, \
+ offsetof(struct xlgmac_pdata, stats.var), \
+ }
+
+static const struct xlgmac_stats_desc xlgmac_gstring_stats[] = {
+ /* MMC TX counters */
+ XLGMAC_STAT("tx_bytes", txoctetcount_gb),
+ XLGMAC_STAT("tx_bytes_good", txoctetcount_g),
+ XLGMAC_STAT("tx_packets", txframecount_gb),
+ XLGMAC_STAT("tx_packets_good", txframecount_g),
+ XLGMAC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XLGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XLGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g),
+ XLGMAC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XLGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g),
+ XLGMAC_STAT("tx_vlan_packets_good", txvlanframes_g),
+ XLGMAC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XLGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XLGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XLGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XLGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XLGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XLGMAC_STAT("tx_underflow_errors", txunderflowerror),
+ XLGMAC_STAT("tx_pause_frames", txpauseframes),
+
+ /* MMC RX counters */
+ XLGMAC_STAT("rx_bytes", rxoctetcount_gb),
+ XLGMAC_STAT("rx_bytes_good", rxoctetcount_g),
+ XLGMAC_STAT("rx_packets", rxframecount_gb),
+ XLGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g),
+ XLGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g),
+ XLGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g),
+ XLGMAC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XLGMAC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XLGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XLGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XLGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XLGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XLGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XLGMAC_STAT("rx_undersize_packets_good", rxundersize_g),
+ XLGMAC_STAT("rx_oversize_packets_good", rxoversize_g),
+ XLGMAC_STAT("rx_crc_errors", rxcrcerror),
+ XLGMAC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XLGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XLGMAC_STAT("rx_length_errors", rxlengtherror),
+ XLGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XLGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XLGMAC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XLGMAC_STAT("rx_pause_frames", rxpauseframes),
+
+ /* Extra counters */
+ XLGMAC_STAT("tx_tso_packets", tx_tso_packets),
+ XLGMAC_STAT("rx_split_header_packets", rx_split_header_packets),
+ XLGMAC_STAT("tx_process_stopped", tx_process_stopped),
+ XLGMAC_STAT("rx_process_stopped", rx_process_stopped),
+ XLGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable),
+ XLGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
+ XLGMAC_STAT("fatal_bus_error", fatal_bus_error),
+ XLGMAC_STAT("tx_vlan_packets", tx_vlan_packets),
+ XLGMAC_STAT("rx_vlan_packets", rx_vlan_packets),
+ XLGMAC_STAT("napi_poll_isr", napi_poll_isr),
+ XLGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer),
+};
+
+#define XLGMAC_STATS_COUNT ARRAY_SIZE(xlgmac_gstring_stats)
+
+static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ u32 ver = pdata->hw_feat.version;
+ u32 snpsver, devid, userver;
+
+ strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ /* S|SNPSVER: Synopsys-defined Version
+ * D|DEVID: Indicates the Device family
+ * U|USERVER: User-defined Version
+ */
+ snpsver = XLGMAC_GET_REG_BITS(ver, MAC_VR_SNPSVER_POS,
+ MAC_VR_SNPSVER_LEN);
+ devid = XLGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS,
+ MAC_VR_DEVID_LEN);
+ userver = XLGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS,
+ MAC_VR_USERVER_LEN);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "S.D.U: %x.%x.%x", snpsver, devid, userver);
+}
+
+static u32 xlgmac_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ return pdata->msg_enable;
+}
+
+static void xlgmac_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ pdata->msg_enable = msglevel;
+}
+
+static void xlgmac_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channel)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ channel->max_rx = XLGMAC_MAX_DMA_CHANNELS;
+ channel->max_tx = XLGMAC_MAX_DMA_CHANNELS;
+ channel->rx_count = pdata->rx_q_count;
+ channel->tx_count = pdata->tx_q_count;
+}
+
+static int xlgmac_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+ ec->rx_coalesce_usecs = pdata->rx_usecs;
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ return 0;
+}
+
+static int xlgmac_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames;
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->stats_block_coalesce_usecs) || (ec->pkt_rate_low) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ rx_usecs = ec->rx_coalesce_usecs;
+ rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ if ((rx_riwt > XLGMAC_MAX_DMA_RIWT) ||
+ (rx_riwt < XLGMAC_MIN_DMA_RIWT) ||
+ (rx_frames > pdata->rx_desc_count))
+ return -EINVAL;
+
+ if (tx_frames > pdata->tx_desc_count)
+ return -EINVAL;
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_ops->config_rx_coalesce(pdata);
+
+ pdata->tx_frames = tx_frames;
+ hw_ops->config_tx_coalesce(pdata);
+
+ return 0;
+}
+
+static void xlgmac_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XLGMAC_STATS_COUNT; i++) {
+ memcpy(data, xlgmac_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int xlgmac_ethtool_get_sset_count(struct net_device *netdev,
+ int stringset)
+{
+ int ret;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XLGMAC_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ pdata->hw_ops.read_mmc_stats(pdata);
+ for (i = 0; i < XLGMAC_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xlgmac_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+}
+
+static const struct ethtool_ops xlgmac_ethtool_ops = {
+ .get_drvinfo = xlgmac_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = xlgmac_ethtool_get_msglevel,
+ .set_msglevel = xlgmac_ethtool_set_msglevel,
+ .get_channels = xlgmac_ethtool_get_channels,
+ .get_coalesce = xlgmac_ethtool_get_coalesce,
+ .set_coalesce = xlgmac_ethtool_set_coalesce,
+ .get_strings = xlgmac_ethtool_get_strings,
+ .get_sset_count = xlgmac_ethtool_get_sset_count,
+ .get_ethtool_stats = xlgmac_ethtool_get_ethtool_stats,
+};
+
+const struct ethtool_ops *xlgmac_get_ethtool_ops(void)
+{
+ return &xlgmac_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
new file mode 100644
index 000000000000..458a7844260a
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -0,0 +1,3147 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+#include <linux/dcbnl.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
+{
+ return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN);
+}
+
+static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
+ writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
+ struct netdev_hw_addr *ha,
+ unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
+
+ mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
+ MAC_MACA1HR_AE_POS,
+ MAC_MACA1HR_AE_LEN,
+ 1);
+ }
+
+ writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+ writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Put the VLAN tag in the Rx descriptor */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
+ MAC_VLANTR_EVLRXS_LEN, 1);
+ /* Don't check the VLAN type */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
+ MAC_VLANTR_DOVLTC_LEN, 1);
+ /* Check only C-TAG (0x8100) packets */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
+ MAC_VLANTR_ERSVLM_LEN, 0);
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
+ MAC_VLANTR_ESVL_LEN, 0);
+ /* Enable VLAN tag stripping */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Enable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Enable VLAN Hash Table filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
+ MAC_VLANTR_VTHM_LEN, 1);
+ /* Disable VLAN tag inverse matching */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
+ MAC_VLANTR_VTIM_LEN, 0);
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
+ MAC_VLANTR_ETV_LEN, 1);
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
+ MAC_VLANTR_VL_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Disable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static u32 xlgmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 poly = 0xedb88320;
+ u32 crc = ~0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
+{
+ u16 vlan_hash_table = 0;
+ __le16 vid_le;
+ u32 regval;
+ u32 crc;
+ u16 vid;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ regval = readl(pdata->mac_regs + MAC_VLANHTR);
+ /* Set the VLAN Hash Table filtering register */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
+ MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
+ writel(regval, pdata->mac_regs + MAC_VLANHTR);
+
+ return 0;
+}
+
+static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
+ MAC_PFR_PR_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ /* Hardware will still perform VLAN filtering in promiscuous mode */
+ if (enable) {
+ xlgmac_disable_rx_vlan_filtering(pdata);
+ } else {
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
+ MAC_PFR_PM_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int addn_macs;
+ unsigned int mac_reg;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xlgmac_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xlgmac_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int i;
+ u32 crc;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ writel(hash_table[i], pdata->mac_regs + hash_reg);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xlgmac_set_mac_hash_table(pdata);
+ else
+ xlgmac_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
+ MAC_PFR_HPF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
+ MAC_PFR_HUC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
+ MAC_PFR_HMC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+ }
+}
+
+static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int val;
+ u32 regval;
+
+ val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
+ MAC_RCR_JE_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xlgmac_enable_rx_csum(pdata);
+ else
+ xlgmac_disable_rx_csum(pdata);
+}
+
+static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANIR);
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
+ MAC_VLANIR_CSVL_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
+ MAC_VLANIR_VLTI_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANIR);
+
+ /* Set the current VLAN Hash Table register value */
+ xlgmac_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ else
+ xlgmac_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xlgmac_enable_rx_vlan_stripping(pdata);
+ else
+ xlgmac_disable_rx_vlan_stripping(pdata);
+}
+
+static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xlgmac_set_promiscuous_mode(pdata, pr_mode);
+ xlgmac_set_all_multicast_mode(pdata, am_mode);
+
+ xlgmac_add_mac_addresses(pdata);
+
+ return 0;
+}
+
+static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
+ struct xlgmac_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned long tx_timeout;
+ unsigned int tx_status;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = readl(pdata->mac_regs + tx_dsr);
+ tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
+ DMA_DSR_TPS_LEN);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
+static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN,
+ MTL_Q_ENABLED);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Enable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+}
+
+static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xlgmac_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+}
+
+static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status, prxq, rxqsts;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, rx_timeout)) {
+ rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
+ prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
+ MTL_Q_RQDR_PRXQ_LEN);
+ rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
+ MTL_Q_RQDR_RXQSTS_LEN);
+ if ((prxq == 0) && (rxqsts == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, rx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int regval, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ /* Enable each Rx queue */
+ regval = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ regval |= (0x02 << (i << 1));
+ writel(regval, pdata->mac_regs + MAC_RQC0R);
+
+ /* Enable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Disable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ xlgmac_prepare_rx_stop(pdata, i);
+
+ /* Disable each Rx queue */
+ writel(0, pdata->mac_regs + MAC_RQC0R);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_desc_data *desc_data;
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
+
+ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int tso_context, vlan_context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int csum, tso, vlan;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ unsigned int tx_set_ic;
+ int i;
+
+ pkt_info = &ring->pkt_info;
+ csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ if (tso && (pkt_info->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += pkt_info->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (pkt_info->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ pkt_info->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Create a context descriptor if this is a TSO pkt_info */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ pkt_info->mss);
+
+ /* Set the MSS size */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_CONTEXT_DESC2_MSS_POS,
+ TX_CONTEXT_DESC2_MSS_LEN,
+ pkt_info->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Indicate this descriptor contains the MSS */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_TCMSSV_POS,
+ TX_CONTEXT_DESC3_TCMSSV_LEN,
+ 1);
+
+ ring->tx.cur_mss = pkt_info->mss;
+ }
+
+ if (vlan_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ pkt_info->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Set the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VT_POS,
+ TX_CONTEXT_DESC3_VT_LEN,
+ pkt_info->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VLTV_POS,
+ TX_CONTEXT_DESC3_VLTV_LEN,
+ 1);
+
+ ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
+ }
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan) {
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_VTIR_POS,
+ TX_NORMAL_DESC2_VTIR_LEN,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ pdata->stats.tx_vlan_packets++;
+ }
+
+ /* Timestamp enablement check */
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_PTP_POS,
+ TX_PACKET_ATTRIBUTES_PTP_LEN))
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_TTSE_POS,
+ TX_NORMAL_DESC2_TTSE_LEN,
+ 1);
+
+ /* Mark it as First Descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FD_POS,
+ TX_NORMAL_DESC3_FD_LEN,
+ 1);
+
+ /* Mark it as a NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN,
+ 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ if (tso) {
+ /* Enable TSO */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TSE_POS,
+ TX_NORMAL_DESC3_TSE_LEN, 1);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPPL_POS,
+ TX_NORMAL_DESC3_TCPPL_LEN,
+ pkt_info->tcp_payload_len);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPHDRLEN_POS,
+ TX_NORMAL_DESC3_TCPHDRLEN_LEN,
+ pkt_info->tcp_header_len / 4);
+
+ pdata->stats.tx_tso_packets++;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CPC_POS,
+ TX_NORMAL_DESC3_CPC_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+
+ /* Set the total length to be transmitted */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FL_POS,
+ TX_NORMAL_DESC3_FL_LEN,
+ pkt_info->length);
+ }
+
+ for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Update buffer address */
+ dma_desc->desc0 =
+ cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 =
+ cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* Set OWN bit */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_IC_POS,
+ TX_NORMAL_DESC2_IC_LEN, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ desc_data->tx.packets = pkt_info->tx_packets;
+ desc_data->tx.bytes = pkt_info->tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ dma_desc = desc_data->dma_desc;
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ if (netif_msg_tx_queued(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, start_index,
+ pkt_info->desc_count, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ smp_wmb();
+
+ ring->cur = cur_index + 1;
+ if (!pkt_info->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xlgmac_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ XLGMAC_PR("%s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->dma_desc_count - 1),
+ (ring->cur - 1) & (ring->dma_desc_count - 1));
+}
+
+static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
+ struct xlgmac_dma_desc *dma_desc)
+{
+ u32 tsa, tsd;
+ u64 nsec;
+
+ tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSA_POS,
+ RX_CONTEXT_DESC3_TSA_LEN);
+ tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSD_POS,
+ RX_CONTEXT_DESC3_TSD_LEN);
+ if (tsa && !tsd) {
+ nsec = le32_to_cpu(dma_desc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(dma_desc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ pkt_info->rx_tstamp = nsec;
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
+ 1);
+ }
+ }
+}
+
+static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ dma_desc->desc0 = 0;
+ dma_desc->desc1 = 0;
+ dma_desc->desc2 = 0;
+ dma_desc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct xlgmac_desc_data *desc_data;
+ int start_index = ring->cur;
+ int i;
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xlgmac_tx_desc_reset(desc_data);
+ }
+
+ /* Update the total number of Tx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
+}
+
+static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ dma_addr_t hdr_dma, buf_dma;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
+ buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_INTE_POS,
+ RX_NORMAL_DESC3_INTE_LEN,
+ inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ dma_wmb();
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ unsigned int start_index = ring->cur;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Rx descriptor */
+ xlgmac_rx_desc_reset(pdata, desc_data, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
+ ring->dma_desc_count - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN);
+}
+
+static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN);
+}
+
+static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Clear MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN,
+ 0);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Set MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+
+ /* Enable transmit flow control */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN, 1);
+ /* Set pause time */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
+ MAC_Q0TFCR_PT_LEN, 0xffff);
+
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->tx_pause)
+ xlgmac_enable_tx_flow_control(pdata);
+ else
+ xlgmac_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->rx_pause)
+ xlgmac_enable_rx_flow_control(pdata);
+ else
+ xlgmac_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
+ DMA_CH_RIWT_RWT_LEN,
+ pdata->rx_riwt);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
+{
+ xlgmac_config_tx_flow_control(pdata);
+ xlgmac_config_rx_flow_control(pdata);
+}
+
+static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
+ MTL_Q_RQOMR_FEP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
+ MTL_Q_RQOMR_FUP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
+{
+ return 0;
+}
+
+static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
+ DMA_CH_RCR_RBSZ_LEN,
+ pdata->rx_buf_size);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ if (pdata->hw_feat.tso) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
+ DMA_CH_TCR_TSE_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+ }
+}
+
+static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
+ DMA_CH_CR_SPH_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
+ MAC_RCR_HDSMS_LEN,
+ XLGMAC_SPH_HDSMS_SIZE);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ return ret;
+}
+
+static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ return ret;
+}
+
+static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
+ MTL_Q_RQOMR_RTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
+ MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
+ MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
+ MTL_TC_QWR_QW_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ }
+
+ /* Set Rx to strict priority algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
+ MTL_OMR_RAA_LEN, MTL_RAA_SP);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+}
+
+static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
+{
+ unsigned int ppq, ppq_extra, prio, prio_queues;
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int reg, regval;
+ unsigned int mask;
+ unsigned int i, j;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+
+ if (i < qptc_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ regval = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ if (i < ppq_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ writel(regval, pdata->mac_regs + reg);
+ reg += MAC_RQC2_INC;
+ regval = 0;
+ }
+
+ /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
+ * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
+ */
+ reg = MTL_RQDCM0R;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
+ MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
+ MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
+ MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+}
+
+static unsigned int xlgmac_calculate_per_queue_fifo(
+ unsigned int fifo_size,
+ unsigned int queue_count)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+
+ /* Calculate the configured fifo size */
+ q_fifo_size = 1 << (fifo_size + 7);
+
+ /* The configured value may not be the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
+
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Each increment in the queue fifo size represents 256 bytes of
+ * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+ * between the queues.
+ */
+ p_fifo = q_fifo_size / 256;
+ if (p_fifo)
+ p_fifo--;
+
+ return p_fifo;
+}
+
+static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.tx_fifo_size,
+ pdata->tx_q_count);
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
+ MTL_Q_TQOMR_TQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.rx_fifo_size,
+ pdata->rx_q_count);
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
+ MTL_Q_RQOMR_RQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ /* Activate flow control when less than 4k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
+ MTL_Q_RQFCR_RFA_LEN, 2);
+ /* De-activate flow control when more than 6k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
+ MTL_Q_RQFCR_RFD_LEN, 4);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ }
+}
+
+static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
+ MTL_Q_TQOMR_TTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
+ MTL_Q_RQOMR_RSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
+ MTL_Q_TQOMR_TSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
+ DMA_CH_TCR_OSP_LEN,
+ pdata->tx_osp_mode);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
+ DMA_CH_CR_PBLX8_LEN,
+ pdata->pblx8);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN,
+ pdata->tx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN,
+ pdata->rx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ return 0;
+}
+
+static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
+
+ val = (u64)readl(pdata->mac_regs + reg_lo);
+
+ if (read_hi)
+ val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_GB_POS,
+ MMC_TISR_TXOCTETCOUNT_GB_LEN))
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_GB_POS,
+ MMC_TISR_TXFRAMECOUNT_GB_LEN))
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_G_POS,
+ MMC_TISR_TXBROADCASTFRAMES_G_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_G_POS,
+ MMC_TISR_TXMULTICASTFRAMES_G_LEN))
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX64OCTETS_GB_POS,
+ MMC_TISR_TX64OCTETS_GB_LEN))
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX65TO127OCTETS_GB_POS,
+ MMC_TISR_TX65TO127OCTETS_GB_LEN))
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX128TO255OCTETS_GB_POS,
+ MMC_TISR_TX128TO255OCTETS_GB_LEN))
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX256TO511OCTETS_GB_POS,
+ MMC_TISR_TX256TO511OCTETS_GB_LEN))
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX512TO1023OCTETS_GB_POS,
+ MMC_TISR_TX512TO1023OCTETS_GB_LEN))
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNICASTFRAMES_GB_POS,
+ MMC_TISR_TXUNICASTFRAMES_GB_LEN))
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_GB_POS,
+ MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_GB_POS,
+ MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNDERFLOWERROR_POS,
+ MMC_TISR_TXUNDERFLOWERROR_LEN))
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_G_POS,
+ MMC_TISR_TXOCTETCOUNT_G_LEN))
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_G_POS,
+ MMC_TISR_TXFRAMECOUNT_G_LEN))
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXPAUSEFRAMES_POS,
+ MMC_TISR_TXPAUSEFRAMES_LEN))
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXVLANFRAMES_G_POS,
+ MMC_TISR_TXVLANFRAMES_G_LEN))
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFRAMECOUNT_GB_POS,
+ MMC_RISR_RXFRAMECOUNT_GB_LEN))
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_GB_POS,
+ MMC_RISR_RXOCTETCOUNT_GB_LEN))
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_G_POS,
+ MMC_RISR_RXOCTETCOUNT_G_LEN))
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXBROADCASTFRAMES_G_POS,
+ MMC_RISR_RXBROADCASTFRAMES_G_LEN))
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXMULTICASTFRAMES_G_POS,
+ MMC_RISR_RXMULTICASTFRAMES_G_LEN))
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXCRCERROR_POS,
+ MMC_RISR_RXCRCERROR_LEN))
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXRUNTERROR_POS,
+ MMC_RISR_RXRUNTERROR_LEN))
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXJABBERERROR_POS,
+ MMC_RISR_RXJABBERERROR_LEN))
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNDERSIZE_G_POS,
+ MMC_RISR_RXUNDERSIZE_G_LEN))
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOVERSIZE_G_POS,
+ MMC_RISR_RXOVERSIZE_G_LEN))
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX64OCTETS_GB_POS,
+ MMC_RISR_RX64OCTETS_GB_LEN))
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX65TO127OCTETS_GB_POS,
+ MMC_RISR_RX65TO127OCTETS_GB_LEN))
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX128TO255OCTETS_GB_POS,
+ MMC_RISR_RX128TO255OCTETS_GB_LEN))
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX256TO511OCTETS_GB_POS,
+ MMC_RISR_RX256TO511OCTETS_GB_LEN))
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX512TO1023OCTETS_GB_POS,
+ MMC_RISR_RX512TO1023OCTETS_GB_LEN))
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNICASTFRAMES_G_POS,
+ MMC_RISR_RXUNICASTFRAMES_G_LEN))
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXLENGTHERROR_POS,
+ MMC_RISR_RXLENGTHERROR_LEN))
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOUTOFRANGETYPE_POS,
+ MMC_RISR_RXOUTOFRANGETYPE_LEN))
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXPAUSEFRAMES_POS,
+ MMC_RISR_RXPAUSEFRAMES_LEN))
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFIFOOVERFLOW_POS,
+ MMC_RISR_RXFIFOOVERFLOW_LEN))
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXVLANFRAMES_GB_POS,
+ MMC_RISR_RXVLANFRAMES_GB_LEN))
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXWATCHDOGERROR_POS,
+ MMC_RISR_RXWATCHDOGERROR_LEN))
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_stats *stats = &pdata->stats;
+ u32 regval;
+
+ /* Freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 0);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MMC_CR);
+ /* Set counters to reset on read */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
+ MMC_CR_ROR_LEN, 1);
+ /* Reset the counters */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
+ MMC_CR_CR_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+ u32 regval;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
+ if (regval) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ writel(val, pdata->mac_regs + MAC_RSSDR);
+
+ regval = readl(pdata->mac_regs + MAC_RSSAR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
+ MAC_RSSAR_RSSIA_LEN, index);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
+ MAC_RSSAR_ADDRT_LEN, type);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
+ MAC_RSSAR_CT_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSAR);
+
+ wait = 1000;
+ while (wait--) {
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN);
+ if (!regval)
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xlgmac_write_rss_reg(pdata,
+ XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xlgmac_write_rss_hash_key(pdata);
+}
+
+static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+ u32 tval;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ tval = table[i];
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ tval);
+ }
+
+ return xlgmac_write_rss_lookup_table(pdata);
+}
+
+static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xlgmac_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xlgmac_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
+
+ /* Enable RSS */
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xlgmac_enable_rss(pdata);
+ else
+ ret = xlgmac_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int dma_ch_isr, dma_ch_ier;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_NIE_POS,
+ DMA_CH_IER_NIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_AIE_POS,
+ DMA_CH_IER_AIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN,
+ 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN,
+ 1);
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN,
+ 1);
+ }
+
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+ }
+}
+
+static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int q_count, i;
+ unsigned int mtl_q_isr;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+ writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+
+ /* No MTL interrupts to be enabled */
+ writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
+ }
+}
+
+static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int mac_ier = 0;
+ u32 regval;
+
+ /* Enable Timestamp interrupt */
+ mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
+ MAC_IER_TSIE_LEN, 1);
+
+ writel(mac_ier, pdata->mac_regs + MAC_IER);
+
+ /* Enable all counter interrupts */
+ regval = readl(pdata->mac_regs + MMC_RIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
+ MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_RIER);
+ regval = readl(pdata->mac_regs + MMC_TIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
+ MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_TIER);
+}
+
+static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x1)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x2)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x2);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x3)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_100000:
+ xlgmac_set_xlgmii_100000_speed(pdata);
+ break;
+
+ case SPEED_50000:
+ xlgmac_set_xlgmii_50000_speed(pdata);
+ break;
+
+ case SPEED_40000:
+ xlgmac_set_xlgmii_40000_speed(pdata);
+ break;
+
+ case SPEED_25000:
+ xlgmac_set_xlgmii_25000_speed(pdata);
+ break;
+ }
+}
+
+static int xlgmac_dev_read(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int err, etlt, l34t;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ dma_desc = desc_data->dma_desc;
+ pkt_info = &ring->pkt_info;
+
+ /* Check for data availability */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ dma_rmb();
+
+ if (netif_msg_rx_status(pdata))
+ xlgmac_dump_rx_desc(pdata, ring, ring->cur);
+
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CTXT_POS,
+ RX_NORMAL_DESC3_CTXT_LEN)) {
+ /* Timestamp Context Descriptor */
+ xlgmac_get_rx_tstamp(pkt_info, dma_desc);
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CDA_POS,
+ RX_NORMAL_DESC3_CDA_LEN))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 1);
+
+ /* Get the header length */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_FD_POS,
+ RX_NORMAL_DESC3_FD_LEN)) {
+ desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
+ RX_NORMAL_DESC2_HL_POS,
+ RX_NORMAL_DESC2_HL_LEN);
+ if (desc_data->rx.hdr_len)
+ pdata->stats.rx_split_header_packets++;
+ }
+
+ /* Get the RSS hash */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_RSV_POS,
+ RX_NORMAL_DESC3_RSV_LEN)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
+ 1);
+
+ pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
+
+ l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_L34T_POS,
+ RX_NORMAL_DESC3_L34T_LEN);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Get the pkt_info length */
+ desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_PL_POS,
+ RX_NORMAL_DESC3_PL_LEN);
+
+ if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_LD_POS,
+ RX_NORMAL_DESC3_LD_LEN)) {
+ /* Not all the data has been transferred for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (netdev->features & NETIF_F_RXCSUM)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ES_POS,
+ RX_NORMAL_DESC3_ES_LEN);
+ etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ETLT_POS,
+ RX_NORMAL_DESC3_ETLT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ pkt_info->vlan_ctag =
+ XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
+ RX_NORMAL_DESC0_OVT_POS,
+ RX_NORMAL_DESC0_OVT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ pkt_info->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 0);
+ else
+ pkt_info->errors = XLGMAC_SET_REG_BITS(
+ pkt_info->errors,
+ RX_PACKET_ERRORS_FRAME_POS,
+ RX_PACKET_ERRORS_FRAME_LEN,
+ 1);
+ }
+
+ XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->dma_desc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xlgmac_enable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ dma_ch_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_disable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
+{
+ unsigned int i, count;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN);
+ while (--count && regval)
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + DMA_SBMR);
+ /* Set enhanced addressing mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
+ DMA_SBMR_EAME_LEN, 1);
+ /* Set the System Bus mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
+ DMA_SBMR_UNDEF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
+ DMA_SBMR_BLEN_256_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_SBMR);
+}
+
+static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ int ret;
+
+ /* Flush Tx queues */
+ ret = xlgmac_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /* Initialize DMA related features */
+ xlgmac_config_dma_bus(pdata);
+ xlgmac_config_osp_mode(pdata);
+ xlgmac_config_pblx8(pdata);
+ xlgmac_config_tx_pbl_val(pdata);
+ xlgmac_config_rx_pbl_val(pdata);
+ xlgmac_config_rx_coalesce(pdata);
+ xlgmac_config_tx_coalesce(pdata);
+ xlgmac_config_rx_buffer_size(pdata);
+ xlgmac_config_tso_mode(pdata);
+ xlgmac_config_sph_mode(pdata);
+ xlgmac_config_rss(pdata);
+ desc_ops->tx_desc_init(pdata);
+ desc_ops->rx_desc_init(pdata);
+ xlgmac_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ xlgmac_config_mtl_mode(pdata);
+ xlgmac_config_queue_mapping(pdata);
+ xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
+ xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
+ xlgmac_config_tx_fifo_size(pdata);
+ xlgmac_config_rx_fifo_size(pdata);
+ xlgmac_config_flow_control_threshold(pdata);
+ xlgmac_config_rx_fep_enable(pdata);
+ xlgmac_config_rx_fup_enable(pdata);
+ xlgmac_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ xlgmac_config_mac_address(pdata);
+ xlgmac_config_rx_mode(pdata);
+ xlgmac_config_jumbo_enable(pdata);
+ xlgmac_config_flow_control(pdata);
+ xlgmac_config_mac_speed(pdata);
+ xlgmac_config_checksum_offload(pdata);
+ xlgmac_config_vlan_support(pdata);
+ xlgmac_config_mmc(pdata);
+ xlgmac_enable_mac_interrupts(pdata);
+
+ return 0;
+}
+
+static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
+{
+ unsigned int count = 2000;
+ u32 regval;
+
+ /* Issue a software reset */
+ regval = readl(pdata->mac_regs + DMA_MR);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
+ DMA_MR_SWR_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_MR);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (--count &&
+ XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
+ DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
+{
+ hw_ops->init = xlgmac_hw_init;
+ hw_ops->exit = xlgmac_hw_exit;
+
+ hw_ops->tx_complete = xlgmac_tx_complete;
+
+ hw_ops->enable_tx = xlgmac_enable_tx;
+ hw_ops->disable_tx = xlgmac_disable_tx;
+ hw_ops->enable_rx = xlgmac_enable_rx;
+ hw_ops->disable_rx = xlgmac_disable_rx;
+
+ hw_ops->dev_xmit = xlgmac_dev_xmit;
+ hw_ops->dev_read = xlgmac_dev_read;
+ hw_ops->enable_int = xlgmac_enable_int;
+ hw_ops->disable_int = xlgmac_disable_int;
+
+ hw_ops->set_mac_address = xlgmac_set_mac_address;
+ hw_ops->config_rx_mode = xlgmac_config_rx_mode;
+ hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
+ hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
+
+ /* For MII speed configuration */
+ hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
+ hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
+ hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
+ hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
+
+ /* For descriptor related operation */
+ hw_ops->tx_desc_init = xlgmac_tx_desc_init;
+ hw_ops->rx_desc_init = xlgmac_rx_desc_init;
+ hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
+ hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
+ hw_ops->is_last_desc = xlgmac_is_last_desc;
+ hw_ops->is_context_desc = xlgmac_is_context_desc;
+ hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
+
+ /* For Flow Control */
+ hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
+ hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
+
+ /* For Vlan related config */
+ hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
+ hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
+ hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
+ hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
+ hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
+
+ /* For RX coalescing */
+ hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
+ hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
+ hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
+ hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
+ hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
+ hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_ops->config_osp_mode = xlgmac_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
+ hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
+ hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
+ hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
+ hw_ops->config_pblx8 = xlgmac_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
+ hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
+ hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
+
+ /* For Receive Side Scaling */
+ hw_ops->enable_rss = xlgmac_enable_rss;
+ hw_ops->disable_rss = xlgmac_disable_rss;
+ hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
+ hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
new file mode 100644
index 000000000000..3b91257683bc
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -0,0 +1,1350 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_one_poll(struct napi_struct *, int);
+static int xlgmac_all_poll(struct napi_struct *, int);
+
+static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
+{
+ return (ring->dma_desc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xlgmac_maybe_stop_tx_queue(
+ struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring,
+ unsigned int count)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+
+ if (count > xlgmac_tx_avail_desc(ring)) {
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_ops.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_prep_vlan(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ if (skb_vlan_tag_present(skb))
+ pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xlgmac_prep_tso(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ int ret;
+
+ if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->tcp_header_len = tcp_hdrlen(skb);
+ pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
+ pkt_info->mss = skb_shinfo(skb)->gso_size;
+
+ XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
+ XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
+ pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
+ XLGMAC_PR("mss=%u\n", pkt_info->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
+ pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
+
+ return 0;
+}
+
+static int xlgmac_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ return 1;
+}
+
+static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ pkt_info->skb = skb;
+
+ context_desc = 0;
+ pkt_info->desc_count = 0;
+
+ pkt_info->tx_packets = 1;
+ pkt_info->tx_bytes = skb->len;
+
+ if (xlgmac_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ pkt_info->desc_count++;
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
+ ~(XLGMAC_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->enable_int(channel, int_id);
+ }
+}
+
+static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->disable_int(channel, int_id);
+ }
+}
+
+static irqreturn_t xlgmac_isr(int irq, void *data)
+{
+ unsigned int dma_isr, dma_ch_isr, mac_isr;
+ struct xlgmac_pdata *pdata = data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int i, ti, ri;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = readl(pdata->mac_regs + DMA_ISR);
+ if (!dma_isr)
+ return IRQ_HANDLED;
+
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel_head + i;
+
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
+
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
+ DMA_CH_SR_TI_LEN);
+ ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
+ DMA_CH_SR_RI_LEN);
+ if (!pdata->per_channel_irq && (ti || ri)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ pdata->stats.napi_poll_isr++;
+ /* Turn on polling */
+ __napi_schedule_irqoff(&pdata->napi);
+ }
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
+ DMA_CH_SR_TPS_LEN))
+ pdata->stats.tx_process_stopped++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
+ DMA_CH_SR_RPS_LEN))
+ pdata->stats.rx_process_stopped++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
+ DMA_CH_SR_TBU_LEN))
+ pdata->stats.tx_buffer_unavailable++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
+ DMA_CH_SR_RBU_LEN))
+ pdata->stats.rx_buffer_unavailable++;
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
+ DMA_CH_SR_FBE_LEN)) {
+ pdata->stats.fatal_bus_error++;
+ schedule_work(&pdata->restart_work);
+ }
+
+ /* Clear all interrupt signals */
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
+ DMA_ISR_MACIS_LEN)) {
+ mac_isr = readl(pdata->mac_regs + MAC_ISR);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
+ MAC_ISR_MMCTXIS_LEN))
+ hw_ops->tx_mmc_int(pdata);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
+ MAC_ISR_MMCRXIS_LEN))
+ hw_ops->rx_mmc_int(pdata);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xlgmac_dma_isr(int irq, void *data)
+{
+ struct xlgmac_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xlgmac_tx_timer(unsigned long data)
+{
+ struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ disable_irq_nosync(channel->dma_irq);
+ else
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ pdata->stats.napi_poll_txtimer++;
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+}
+
+static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ setup_timer(&channel->tx_timer, xlgmac_tx_timer,
+ (unsigned long)channel);
+ }
+}
+
+static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ del_timer_sync(&channel->tx_timer);
+ }
+}
+
+static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xlgmac_one_poll,
+ NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xlgmac_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
+ IRQF_SHARED, netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xlgmac_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
+static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static int xlgmac_start(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ hw_ops->init(pdata);
+ xlgmac_napi_enable(pdata, 1);
+
+ ret = xlgmac_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
+ hw_ops->enable_tx(pdata);
+ hw_ops->enable_rx(pdata);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+
+err_napi:
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ return ret;
+}
+
+static void xlgmac_stop(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ netif_tx_stop_all_queues(netdev);
+ xlgmac_stop_timers(pdata);
+ hw_ops->disable_tx(pdata);
+ hw_ops->disable_rx(pdata);
+ xlgmac_free_irqs(pdata);
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+}
+
+static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
+{
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xlgmac_stop(pdata);
+
+ xlgmac_free_tx_data(pdata);
+ xlgmac_free_rx_data(pdata);
+
+ xlgmac_start(pdata);
+}
+
+static void xlgmac_restart(struct work_struct *work)
+{
+ struct xlgmac_pdata *pdata = container_of(work,
+ struct xlgmac_pdata,
+ restart_work);
+
+ rtnl_lock();
+
+ xlgmac_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static int xlgmac_open(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* TODO: Initialize the phy */
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ return ret;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the channels and rings */
+ ret = desc_ops->alloc_channles_and_rings(pdata);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&pdata->restart_work, xlgmac_restart);
+ xlgmac_init_timers(pdata);
+
+ ret = xlgmac_start(pdata);
+ if (ret)
+ goto err_channels_and_rings;
+
+ return 0;
+
+err_channels_and_rings:
+ desc_ops->free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_close(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* Stop the device */
+ xlgmac_stop(pdata);
+
+ /* Free the channels and rings */
+ desc_ops->free_channels_and_rings(pdata);
+
+ return 0;
+}
+
+static void xlgmac_tx_timeout(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
+static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_pkt_info *tx_pkt_info;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ struct xlgmac_ring *ring;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ XLGMAC_PR("skb->len = %d\n", skb->len);
+
+ channel = pdata->channel_head + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ tx_pkt_info = &ring->pkt_info;
+
+ if (skb->len == 0) {
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Prepare preliminary packet info for TX */
+ memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
+ xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
+
+ /* Check that there are enough descriptors available */
+ ret = xlgmac_maybe_stop_tx_queue(channel, ring,
+ tx_pkt_info->desc_count);
+ if (ret)
+ return ret;
+
+ ret = xlgmac_prep_tso(skb, tx_pkt_info);
+ if (ret) {
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ xlgmac_prep_vlan(skb, tx_pkt_info);
+
+ if (!desc_ops->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_ops->dev_xmit(channel);
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, true);
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
+
+ return NETDEV_TX_OK;
+}
+
+static void xlgmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_stats *pstats = &pdata->stats;
+
+ pdata->hw_ops.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+}
+
+static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_ops->set_mac_address(pdata, netdev->dev_addr);
+
+ return 0;
+}
+
+static int xlgmac_ioctl(struct net_device *netdev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ int ret;
+
+ ret = xlgmac_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xlgmac_restart_dev(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ set_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xlgmac_poll_controller(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xlgmac_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xlgmac_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int xlgmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_ops->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_ops->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_ops->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_ops->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_ops->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_ops->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_ops->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_ops->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ return 0;
+}
+
+static void xlgmac_set_rx_mode(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ hw_ops->config_rx_mode(pdata);
+}
+
+static const struct net_device_ops xlgmac_netdev_ops = {
+ .ndo_open = xlgmac_open,
+ .ndo_stop = xlgmac_close,
+ .ndo_start_xmit = xlgmac_xmit,
+ .ndo_tx_timeout = xlgmac_tx_timeout,
+ .ndo_get_stats64 = xlgmac_get_stats64,
+ .ndo_change_mtu = xlgmac_change_mtu,
+ .ndo_set_mac_address = xlgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xlgmac_ioctl,
+ .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xlgmac_poll_controller,
+#endif
+ .ndo_set_features = xlgmac_set_features,
+ .ndo_set_rx_mode = xlgmac_set_rx_mode,
+};
+
+const struct net_device_ops *xlgmac_get_netdev_ops(void)
+{
+ return &xlgmac_netdev_ops;
+}
+
+static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ while (ring->dirty != ring->cur) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset desc_data values */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+
+ if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
+
+ ring->dirty++;
+ }
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
+ struct napi_struct *napi,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int len)
+{
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ u8 *packet;
+
+ skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
+ desc_data->rx.hdr.dma_off,
+ desc_data->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ packet = page_address(desc_data->rx.hdr.pa.pages) +
+ desc_data->rx.hdr.pa.pages_offset;
+ copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
+ copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_range_for_cpu(pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ len, desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+
+ return skb;
+}
+
+static int xlgmac_tx_poll(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int cur;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
+ (ring->dirty != cur)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+ dma_desc = desc_data->dma_desc;
+
+ if (!hw_ops->tx_complete(dma_desc))
+ break;
+
+ /* Make sure descriptor fields are read after reading
+ * the OWN bit
+ */
+ dma_rmb();
+
+ if (netif_msg_tx_done(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
+
+ if (hw_ops->is_last_desc(dma_desc)) {
+ tx_packets += desc_data->tx.packets;
+ tx_bytes += desc_data->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ hw_ops->tx_desc_reset(desc_data);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ XLGMAC_PR("processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int len, dma_desc_len, max_len;
+ unsigned int context_next, context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int incomplete, error;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int received = 0;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ int packet_count = 0;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ incomplete = 0;
+ context_next = 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ pkt_info = &ring->pkt_info;
+ while (packet_count < budget) {
+ /* First time in loop see if we need to restore state */
+ if (!received && desc_data->state_saved) {
+ skb = desc_data->state.skb;
+ error = desc_data->state.error;
+ len = desc_data->state.len;
+ } else {
+ memset(pkt_info, 0, sizeof(*pkt_info));
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+
+ if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
+ xlgmac_rx_refresh(channel);
+
+ if (hw_ops->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ incomplete = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
+ context_next = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
+ context = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
+
+ /* Earlier error, just drain the remaining data */
+ if ((incomplete || context_next) && error)
+ goto read_again;
+
+ if (error || pkt_info->errors) {
+ if (pkt_info->errors)
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ /* Length is cumulative, get this descriptor's length */
+ dma_desc_len = desc_data->rx.len - len;
+ len += dma_desc_len;
+
+ if (dma_desc_len && !skb) {
+ skb = xlgmac_create_skb(pdata, napi, desc_data,
+ dma_desc_len);
+ if (!skb)
+ error = 1;
+ } else if (dma_desc_len) {
+ dma_sync_single_range_for_cpu(
+ pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(
+ skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ dma_desc_len,
+ desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+ }
+
+ if (incomplete || context_next)
+ goto read_again;
+
+ if (!skb)
+ goto next_packet;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, false);
+
+ skb_checksum_none_assert(skb);
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ pkt_info->vlan_ctag);
+ pdata->stats.rx_vlan_packets++;
+ }
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
+ skb_set_hash(skb, pkt_info->rss_hash,
+ pkt_info->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ desc_data->state_saved = 1;
+ desc_data->state.skb = skb;
+ desc_data->state.len = len;
+ desc_data->state.error = error;
+ }
+
+ XLGMAC_PR("packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xlgmac_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_channel *channel = container_of(napi,
+ struct xlgmac_channel,
+ napi);
+ int processed = 0;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xlgmac_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_pdata *pdata = container_of(napi,
+ struct xlgmac_pdata,
+ napi);
+ struct xlgmac_channel *channel;
+ int processed, last_processed;
+ int ring_budget;
+ unsigned int i;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xlgmac_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ xlgmac_enable_rx_tx_ints(pdata);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
new file mode 100644
index 000000000000..386bafe74c3f
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -0,0 +1,78 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ struct device *dev = &pcidev->dev;
+ struct xlgmac_resources res;
+ int i, ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret) {
+ dev_err(dev, "ERROR: failed to enable device\n");
+ return ret;
+ }
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pcidev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pcidev);
+
+ memset(&res, 0, sizeof(res));
+ res.irq = pcidev->irq;
+ res.addr = pcim_iomap_table(pcidev)[i];
+
+ return xlgmac_drv_probe(&pcidev->dev, &res);
+}
+
+static void xlgmac_remove(struct pci_dev *pcidev)
+{
+ xlgmac_drv_remove(&pcidev->dev);
+}
+
+static const struct pci_device_id xlgmac_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl);
+
+static struct pci_driver xlgmac_pci_driver = {
+ .name = XLGMAC_DRV_NAME,
+ .id_table = xlgmac_pci_tbl,
+ .probe = xlgmac_probe,
+ .remove = xlgmac_remove,
+};
+
+module_pci_driver(xlgmac_pci_driver);
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
new file mode 100644
index 000000000000..3754f220567d
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
@@ -0,0 +1,744 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_REG_H__
+#define __DWC_XLGMAC_REG_H__
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_VR 0x0110
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_POS 18
+#define MAC_HWF0R_ADDMACADRSEL_LEN 5
+#define MAC_HWF0R_ARPOFFSEL_POS 9
+#define MAC_HWF0R_ARPOFFSEL_LEN 1
+#define MAC_HWF0R_EEESEL_POS 13
+#define MAC_HWF0R_EEESEL_LEN 1
+#define MAC_HWF0R_PHYIFSEL_POS 1
+#define MAC_HWF0R_PHYIFSEL_LEN 2
+#define MAC_HWF0R_MGKSEL_POS 7
+#define MAC_HWF0R_MGKSEL_LEN 1
+#define MAC_HWF0R_MMCSEL_POS 8
+#define MAC_HWF0R_MMCSEL_LEN 1
+#define MAC_HWF0R_RWKSEL_POS 6
+#define MAC_HWF0R_RWKSEL_LEN 1
+#define MAC_HWF0R_RXCOESEL_POS 16
+#define MAC_HWF0R_RXCOESEL_LEN 1
+#define MAC_HWF0R_SAVLANINS_POS 27
+#define MAC_HWF0R_SAVLANINS_LEN 1
+#define MAC_HWF0R_SMASEL_POS 5
+#define MAC_HWF0R_SMASEL_LEN 1
+#define MAC_HWF0R_TSSEL_POS 12
+#define MAC_HWF0R_TSSEL_LEN 1
+#define MAC_HWF0R_TSSTSSEL_POS 25
+#define MAC_HWF0R_TSSTSSEL_LEN 2
+#define MAC_HWF0R_TXCOESEL_POS 14
+#define MAC_HWF0R_TXCOESEL_LEN 1
+#define MAC_HWF0R_VLHASH_POS 4
+#define MAC_HWF0R_VLHASH_LEN 1
+#define MAC_HWF1R_ADDR64_POS 14
+#define MAC_HWF1R_ADDR64_LEN 2
+#define MAC_HWF1R_ADVTHWORD_POS 13
+#define MAC_HWF1R_ADVTHWORD_LEN 1
+#define MAC_HWF1R_DBGMEMA_POS 19
+#define MAC_HWF1R_DBGMEMA_LEN 1
+#define MAC_HWF1R_DCBEN_POS 16
+#define MAC_HWF1R_DCBEN_LEN 1
+#define MAC_HWF1R_HASHTBLSZ_POS 24
+#define MAC_HWF1R_HASHTBLSZ_LEN 3
+#define MAC_HWF1R_L3L4FNUM_POS 27
+#define MAC_HWF1R_L3L4FNUM_LEN 4
+#define MAC_HWF1R_NUMTC_POS 21
+#define MAC_HWF1R_NUMTC_LEN 3
+#define MAC_HWF1R_RSSEN_POS 20
+#define MAC_HWF1R_RSSEN_LEN 1
+#define MAC_HWF1R_RXFIFOSIZE_POS 0
+#define MAC_HWF1R_RXFIFOSIZE_LEN 5
+#define MAC_HWF1R_SPHEN_POS 17
+#define MAC_HWF1R_SPHEN_LEN 1
+#define MAC_HWF1R_TSOEN_POS 18
+#define MAC_HWF1R_TSOEN_LEN 1
+#define MAC_HWF1R_TXFIFOSIZE_POS 6
+#define MAC_HWF1R_TXFIFOSIZE_LEN 5
+#define MAC_HWF2R_AUXSNAPNUM_POS 28
+#define MAC_HWF2R_AUXSNAPNUM_LEN 3
+#define MAC_HWF2R_PPSOUTNUM_POS 24
+#define MAC_HWF2R_PPSOUTNUM_LEN 3
+#define MAC_HWF2R_RXCHCNT_POS 12
+#define MAC_HWF2R_RXCHCNT_LEN 4
+#define MAC_HWF2R_RXQCNT_POS 0
+#define MAC_HWF2R_RXQCNT_LEN 4
+#define MAC_HWF2R_TXCHCNT_POS 18
+#define MAC_HWF2R_TXCHCNT_LEN 4
+#define MAC_HWF2R_TXQCNT_POS 6
+#define MAC_HWF2R_TXQCNT_LEN 4
+#define MAC_IER_TSIE_POS 12
+#define MAC_IER_TSIE_LEN 1
+#define MAC_ISR_MMCRXIS_POS 9
+#define MAC_ISR_MMCRXIS_LEN 1
+#define MAC_ISR_MMCTXIS_POS 10
+#define MAC_ISR_MMCTXIS_LEN 1
+#define MAC_ISR_PMTIS_POS 4
+#define MAC_ISR_PMTIS_LEN 1
+#define MAC_ISR_TSIS_POS 12
+#define MAC_ISR_TSIS_LEN 1
+#define MAC_MACA1HR_AE_POS 31
+#define MAC_MACA1HR_AE_LEN 1
+#define MAC_PFR_HMC_POS 2
+#define MAC_PFR_HMC_LEN 1
+#define MAC_PFR_HPF_POS 10
+#define MAC_PFR_HPF_LEN 1
+#define MAC_PFR_HUC_POS 1
+#define MAC_PFR_HUC_LEN 1
+#define MAC_PFR_PM_POS 4
+#define MAC_PFR_PM_LEN 1
+#define MAC_PFR_PR_POS 0
+#define MAC_PFR_PR_LEN 1
+#define MAC_PFR_VTFE_POS 16
+#define MAC_PFR_VTFE_LEN 1
+#define MAC_Q0TFCR_PT_POS 16
+#define MAC_Q0TFCR_PT_LEN 16
+#define MAC_Q0TFCR_TFE_POS 1
+#define MAC_Q0TFCR_TFE_LEN 1
+#define MAC_RCR_ACS_POS 1
+#define MAC_RCR_ACS_LEN 1
+#define MAC_RCR_CST_POS 2
+#define MAC_RCR_CST_LEN 1
+#define MAC_RCR_DCRCC_POS 3
+#define MAC_RCR_DCRCC_LEN 1
+#define MAC_RCR_HDSMS_POS 12
+#define MAC_RCR_HDSMS_LEN 3
+#define MAC_RCR_IPC_POS 9
+#define MAC_RCR_IPC_LEN 1
+#define MAC_RCR_JE_POS 8
+#define MAC_RCR_JE_LEN 1
+#define MAC_RCR_LM_POS 10
+#define MAC_RCR_LM_LEN 1
+#define MAC_RCR_RE_POS 0
+#define MAC_RCR_RE_LEN 1
+#define MAC_RFCR_PFCE_POS 8
+#define MAC_RFCR_PFCE_LEN 1
+#define MAC_RFCR_RFE_POS 0
+#define MAC_RFCR_RFE_LEN 1
+#define MAC_RFCR_UP_POS 1
+#define MAC_RFCR_UP_LEN 1
+#define MAC_RQC0R_RXQ0EN_POS 0
+#define MAC_RQC0R_RXQ0EN_LEN 2
+#define MAC_RSSAR_ADDRT_POS 2
+#define MAC_RSSAR_ADDRT_LEN 1
+#define MAC_RSSAR_CT_POS 1
+#define MAC_RSSAR_CT_LEN 1
+#define MAC_RSSAR_OB_POS 0
+#define MAC_RSSAR_OB_LEN 1
+#define MAC_RSSAR_RSSIA_POS 8
+#define MAC_RSSAR_RSSIA_LEN 8
+#define MAC_RSSCR_IP2TE_POS 1
+#define MAC_RSSCR_IP2TE_LEN 1
+#define MAC_RSSCR_RSSE_POS 0
+#define MAC_RSSCR_RSSE_LEN 1
+#define MAC_RSSCR_TCP4TE_POS 2
+#define MAC_RSSCR_TCP4TE_LEN 1
+#define MAC_RSSCR_UDP4TE_POS 3
+#define MAC_RSSCR_UDP4TE_LEN 1
+#define MAC_RSSDR_DMCH_POS 0
+#define MAC_RSSDR_DMCH_LEN 4
+#define MAC_TCR_SS_POS 28
+#define MAC_TCR_SS_LEN 3
+#define MAC_TCR_TE_POS 0
+#define MAC_TCR_TE_LEN 1
+#define MAC_VLANHTR_VLHT_POS 0
+#define MAC_VLANHTR_VLHT_LEN 16
+#define MAC_VLANIR_VLTI_POS 20
+#define MAC_VLANIR_VLTI_LEN 1
+#define MAC_VLANIR_CSVL_POS 19
+#define MAC_VLANIR_CSVL_LEN 1
+#define MAC_VLANTR_DOVLTC_POS 20
+#define MAC_VLANTR_DOVLTC_LEN 1
+#define MAC_VLANTR_ERSVLM_POS 19
+#define MAC_VLANTR_ERSVLM_LEN 1
+#define MAC_VLANTR_ESVL_POS 18
+#define MAC_VLANTR_ESVL_LEN 1
+#define MAC_VLANTR_ETV_POS 16
+#define MAC_VLANTR_ETV_LEN 1
+#define MAC_VLANTR_EVLS_POS 21
+#define MAC_VLANTR_EVLS_LEN 2
+#define MAC_VLANTR_EVLRXS_POS 24
+#define MAC_VLANTR_EVLRXS_LEN 1
+#define MAC_VLANTR_VL_POS 0
+#define MAC_VLANTR_VL_LEN 16
+#define MAC_VLANTR_VTHM_POS 25
+#define MAC_VLANTR_VTHM_LEN 1
+#define MAC_VLANTR_VTIM_POS 17
+#define MAC_VLANTR_VTIM_LEN 1
+#define MAC_VR_DEVID_POS 8
+#define MAC_VR_DEVID_LEN 8
+#define MAC_VR_SNPSVER_POS 0
+#define MAC_VR_SNPSVER_LEN 8
+#define MAC_VR_USERVER_POS 16
+#define MAC_VR_USERVER_LEN 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_POS 0
+#define MMC_CR_CR_LEN 1
+#define MMC_CR_CSR_POS 1
+#define MMC_CR_CSR_LEN 1
+#define MMC_CR_ROR_POS 2
+#define MMC_CR_ROR_LEN 1
+#define MMC_CR_MCF_POS 3
+#define MMC_CR_MCF_LEN 1
+#define MMC_CR_MCT_POS 4
+#define MMC_CR_MCT_LEN 2
+#define MMC_RIER_ALL_INTERRUPTS_POS 0
+#define MMC_RIER_ALL_INTERRUPTS_LEN 23
+#define MMC_RISR_RXFRAMECOUNT_GB_POS 0
+#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_GB_POS 1
+#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_G_POS 2
+#define MMC_RISR_RXOCTETCOUNT_G_LEN 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXCRCERROR_POS 5
+#define MMC_RISR_RXCRCERROR_LEN 1
+#define MMC_RISR_RXRUNTERROR_POS 6
+#define MMC_RISR_RXRUNTERROR_LEN 1
+#define MMC_RISR_RXJABBERERROR_POS 7
+#define MMC_RISR_RXJABBERERROR_LEN 1
+#define MMC_RISR_RXUNDERSIZE_G_POS 8
+#define MMC_RISR_RXUNDERSIZE_G_LEN 1
+#define MMC_RISR_RXOVERSIZE_G_POS 9
+#define MMC_RISR_RXOVERSIZE_G_LEN 1
+#define MMC_RISR_RX64OCTETS_GB_POS 10
+#define MMC_RISR_RX64OCTETS_GB_LEN 1
+#define MMC_RISR_RX65TO127OCTETS_GB_POS 11
+#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1
+#define MMC_RISR_RX128TO255OCTETS_GB_POS 12
+#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1
+#define MMC_RISR_RX256TO511OCTETS_GB_POS 13
+#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_POS 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_RISR_RXUNICASTFRAMES_G_POS 16
+#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXLENGTHERROR_POS 17
+#define MMC_RISR_RXLENGTHERROR_LEN 1
+#define MMC_RISR_RXOUTOFRANGETYPE_POS 18
+#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1
+#define MMC_RISR_RXPAUSEFRAMES_POS 19
+#define MMC_RISR_RXPAUSEFRAMES_LEN 1
+#define MMC_RISR_RXFIFOOVERFLOW_POS 20
+#define MMC_RISR_RXFIFOOVERFLOW_LEN 1
+#define MMC_RISR_RXVLANFRAMES_GB_POS 21
+#define MMC_RISR_RXVLANFRAMES_GB_LEN 1
+#define MMC_RISR_RXWATCHDOGERROR_POS 22
+#define MMC_RISR_RXWATCHDOGERROR_LEN 1
+#define MMC_TIER_ALL_INTERRUPTS_POS 0
+#define MMC_TIER_ALL_INTERRUPTS_LEN 18
+#define MMC_TISR_TXOCTETCOUNT_GB_POS 0
+#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_GB_POS 1
+#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1
+#define MMC_TISR_TX64OCTETS_GB_POS 4
+#define MMC_TISR_TX64OCTETS_GB_LEN 1
+#define MMC_TISR_TX65TO127OCTETS_GB_POS 5
+#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1
+#define MMC_TISR_TX128TO255OCTETS_GB_POS 6
+#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1
+#define MMC_TISR_TX256TO511OCTETS_GB_POS 7
+#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXUNDERFLOWERROR_POS 13
+#define MMC_TISR_TXUNDERFLOWERROR_LEN 1
+#define MMC_TISR_TXOCTETCOUNT_G_POS 14
+#define MMC_TISR_TXOCTETCOUNT_G_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_G_POS 15
+#define MMC_TISR_TXFRAMECOUNT_G_LEN 1
+#define MMC_TISR_TXPAUSEFRAMES_POS 16
+#define MMC_TISR_TXPAUSEFRAMES_LEN 1
+#define MMC_TISR_TXVLANFRAMES_G_POS 17
+#define MMC_TISR_TXVLANFRAMES_G_LEN 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDDR 0x1010
+#define MTL_RQDCM0R 0x1030
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_POS 5
+#define MTL_OMR_ETSALG_LEN 2
+#define MTL_OMR_RAA_POS 2
+#define MTL_OMR_RAA_LEN 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_POS 16
+#define MTL_Q_RQDR_PRXQ_LEN 14
+#define MTL_Q_RQDR_RXQSTS_POS 4
+#define MTL_Q_RQDR_RXQSTS_LEN 2
+#define MTL_Q_RQFCR_RFA_POS 1
+#define MTL_Q_RQFCR_RFA_LEN 6
+#define MTL_Q_RQFCR_RFD_POS 17
+#define MTL_Q_RQFCR_RFD_LEN 6
+#define MTL_Q_RQOMR_EHFC_POS 7
+#define MTL_Q_RQOMR_EHFC_LEN 1
+#define MTL_Q_RQOMR_RQS_POS 16
+#define MTL_Q_RQOMR_RQS_LEN 9
+#define MTL_Q_RQOMR_RSF_POS 5
+#define MTL_Q_RQOMR_RSF_LEN 1
+#define MTL_Q_RQOMR_FEP_POS 4
+#define MTL_Q_RQOMR_FEP_LEN 1
+#define MTL_Q_RQOMR_FUP_POS 3
+#define MTL_Q_RQOMR_FUP_LEN 1
+#define MTL_Q_RQOMR_RTC_POS 0
+#define MTL_Q_RQOMR_RTC_LEN 2
+#define MTL_Q_TQOMR_FTQ_POS 0
+#define MTL_Q_TQOMR_FTQ_LEN 1
+#define MTL_Q_TQOMR_Q2TCMAP_POS 8
+#define MTL_Q_TQOMR_Q2TCMAP_LEN 3
+#define MTL_Q_TQOMR_TQS_POS 16
+#define MTL_Q_TQOMR_TQS_LEN 10
+#define MTL_Q_TQOMR_TSF_POS 1
+#define MTL_Q_TQOMR_TSF_LEN 1
+#define MTL_Q_TQOMR_TTC_POS 4
+#define MTL_Q_TQOMR_TTC_LEN 3
+#define MTL_Q_TQOMR_TXQEN_POS 2
+#define MTL_Q_TQOMR_TXQEN_LEN 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+#define MTL_RQDCM0R_Q0MDMACH 0x0
+#define MTL_RQDCM0R_Q1MDMACH 0x00000100
+#define MTL_RQDCM0R_Q2MDMACH 0x00020000
+#define MTL_RQDCM0R_Q3MDMACH 0x03000000
+#define MTL_RQDCM1R_Q4MDMACH 0x00000004
+#define MTL_RQDCM1R_Q5MDMACH 0x00000500
+#define MTL_RQDCM1R_Q6MDMACH 0x00060000
+#define MTL_RQDCM1R_Q7MDMACH 0x07000000
+#define MTL_RQDCM2R_Q8MDMACH 0x00000008
+#define MTL_RQDCM2R_Q9MDMACH 0x00000900
+#define MTL_RQDCM2R_Q10MDMACH 0x000A0000
+#define MTL_RQDCM2R_Q11MDMACH 0x0B000000
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_POS 0
+#define MTL_TC_ETSCR_TSA_LEN 2
+#define MTL_TC_QWR_QW_POS 0
+#define MTL_TC_QWR_QW_LEN 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_ISR_MACIS_POS 17
+#define DMA_ISR_MACIS_LEN 1
+#define DMA_ISR_MTLIS_POS 16
+#define DMA_ISR_MTLIS_LEN 1
+#define DMA_MR_SWR_POS 0
+#define DMA_MR_SWR_LEN 1
+#define DMA_SBMR_EAME_POS 11
+#define DMA_SBMR_EAME_LEN 1
+#define DMA_SBMR_BLEN_64_POS 5
+#define DMA_SBMR_BLEN_64_LEN 1
+#define DMA_SBMR_BLEN_128_POS 6
+#define DMA_SBMR_BLEN_128_LEN 1
+#define DMA_SBMR_BLEN_256_POS 7
+#define DMA_SBMR_BLEN_256_LEN 1
+#define DMA_SBMR_UNDEF_POS 0
+#define DMA_SBMR_UNDEF_LEN 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_LEN 4
+#define DMA_DSR_TPS_LEN 4
+#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN)
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_POS 16
+#define DMA_CH_CR_PBLX8_LEN 1
+#define DMA_CH_CR_SPH_POS 24
+#define DMA_CH_CR_SPH_LEN 1
+#define DMA_CH_IER_AIE_POS 15
+#define DMA_CH_IER_AIE_LEN 1
+#define DMA_CH_IER_FBEE_POS 12
+#define DMA_CH_IER_FBEE_LEN 1
+#define DMA_CH_IER_NIE_POS 16
+#define DMA_CH_IER_NIE_LEN 1
+#define DMA_CH_IER_RBUE_POS 7
+#define DMA_CH_IER_RBUE_LEN 1
+#define DMA_CH_IER_RIE_POS 6
+#define DMA_CH_IER_RIE_LEN 1
+#define DMA_CH_IER_RSE_POS 8
+#define DMA_CH_IER_RSE_LEN 1
+#define DMA_CH_IER_TBUE_POS 2
+#define DMA_CH_IER_TBUE_LEN 1
+#define DMA_CH_IER_TIE_POS 0
+#define DMA_CH_IER_TIE_LEN 1
+#define DMA_CH_IER_TXSE_POS 1
+#define DMA_CH_IER_TXSE_LEN 1
+#define DMA_CH_RCR_PBL_POS 16
+#define DMA_CH_RCR_PBL_LEN 6
+#define DMA_CH_RCR_RBSZ_POS 1
+#define DMA_CH_RCR_RBSZ_LEN 14
+#define DMA_CH_RCR_SR_POS 0
+#define DMA_CH_RCR_SR_LEN 1
+#define DMA_CH_RIWT_RWT_POS 0
+#define DMA_CH_RIWT_RWT_LEN 8
+#define DMA_CH_SR_FBE_POS 12
+#define DMA_CH_SR_FBE_LEN 1
+#define DMA_CH_SR_RBU_POS 7
+#define DMA_CH_SR_RBU_LEN 1
+#define DMA_CH_SR_RI_POS 6
+#define DMA_CH_SR_RI_LEN 1
+#define DMA_CH_SR_RPS_POS 8
+#define DMA_CH_SR_RPS_LEN 1
+#define DMA_CH_SR_TBU_POS 2
+#define DMA_CH_SR_TBU_LEN 1
+#define DMA_CH_SR_TI_POS 0
+#define DMA_CH_SR_TI_LEN 1
+#define DMA_CH_SR_TPS_POS 1
+#define DMA_CH_SR_TPS_LEN 1
+#define DMA_CH_TCR_OSP_POS 4
+#define DMA_CH_TCR_OSP_LEN 1
+#define DMA_CH_TCR_PBL_POS 16
+#define DMA_CH_TCR_PBL_LEN 6
+#define DMA_CH_TCR_ST_POS 0
+#define DMA_CH_TCR_ST_LEN 1
+#define DMA_CH_TCR_TSE_POS 12
+#define DMA_CH_TCR_TSE_LEN 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64
+#define DMA_PBL_128 128
+#define DMA_PBL_256 256
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_POS 2
+#define RX_PACKET_ERRORS_CRC_LEN 1
+#define RX_PACKET_ERRORS_FRAME_POS 3
+#define RX_PACKET_ERRORS_FRAME_LEN 1
+#define RX_PACKET_ERRORS_LENGTH_POS 0
+#define RX_PACKET_ERRORS_LENGTH_LEN 1
+#define RX_PACKET_ERRORS_OVERRUN_POS 1
+#define RX_PACKET_ERRORS_OVERRUN_LEN 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1
+
+#define RX_NORMAL_DESC0_OVT_POS 0
+#define RX_NORMAL_DESC0_OVT_LEN 16
+#define RX_NORMAL_DESC2_HL_POS 0
+#define RX_NORMAL_DESC2_HL_LEN 10
+#define RX_NORMAL_DESC3_CDA_POS 27
+#define RX_NORMAL_DESC3_CDA_LEN 1
+#define RX_NORMAL_DESC3_CTXT_POS 30
+#define RX_NORMAL_DESC3_CTXT_LEN 1
+#define RX_NORMAL_DESC3_ES_POS 15
+#define RX_NORMAL_DESC3_ES_LEN 1
+#define RX_NORMAL_DESC3_ETLT_POS 16
+#define RX_NORMAL_DESC3_ETLT_LEN 4
+#define RX_NORMAL_DESC3_FD_POS 29
+#define RX_NORMAL_DESC3_FD_LEN 1
+#define RX_NORMAL_DESC3_INTE_POS 30
+#define RX_NORMAL_DESC3_INTE_LEN 1
+#define RX_NORMAL_DESC3_L34T_POS 20
+#define RX_NORMAL_DESC3_L34T_LEN 4
+#define RX_NORMAL_DESC3_LD_POS 28
+#define RX_NORMAL_DESC3_LD_LEN 1
+#define RX_NORMAL_DESC3_OWN_POS 31
+#define RX_NORMAL_DESC3_OWN_LEN 1
+#define RX_NORMAL_DESC3_PL_POS 0
+#define RX_NORMAL_DESC3_PL_LEN 14
+#define RX_NORMAL_DESC3_RSV_POS 26
+#define RX_NORMAL_DESC3_RSV_LEN 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_POS 4
+#define RX_CONTEXT_DESC3_TSA_LEN 1
+#define RX_CONTEXT_DESC3_TSD_POS 6
+#define RX_CONTEXT_DESC3_TSD_LEN 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define TX_PACKET_ATTRIBUTES_PTP_POS 3
+#define TX_PACKET_ATTRIBUTES_PTP_LEN 1
+
+#define TX_CONTEXT_DESC2_MSS_POS 0
+#define TX_CONTEXT_DESC2_MSS_LEN 15
+#define TX_CONTEXT_DESC3_CTXT_POS 30
+#define TX_CONTEXT_DESC3_CTXT_LEN 1
+#define TX_CONTEXT_DESC3_TCMSSV_POS 26
+#define TX_CONTEXT_DESC3_TCMSSV_LEN 1
+#define TX_CONTEXT_DESC3_VLTV_POS 16
+#define TX_CONTEXT_DESC3_VLTV_LEN 1
+#define TX_CONTEXT_DESC3_VT_POS 0
+#define TX_CONTEXT_DESC3_VT_LEN 16
+
+#define TX_NORMAL_DESC2_HL_B1L_POS 0
+#define TX_NORMAL_DESC2_HL_B1L_LEN 14
+#define TX_NORMAL_DESC2_IC_POS 31
+#define TX_NORMAL_DESC2_IC_LEN 1
+#define TX_NORMAL_DESC2_TTSE_POS 30
+#define TX_NORMAL_DESC2_TTSE_LEN 1
+#define TX_NORMAL_DESC2_VTIR_POS 14
+#define TX_NORMAL_DESC2_VTIR_LEN 2
+#define TX_NORMAL_DESC3_CIC_POS 16
+#define TX_NORMAL_DESC3_CIC_LEN 2
+#define TX_NORMAL_DESC3_CPC_POS 26
+#define TX_NORMAL_DESC3_CPC_LEN 2
+#define TX_NORMAL_DESC3_CTXT_POS 30
+#define TX_NORMAL_DESC3_CTXT_LEN 1
+#define TX_NORMAL_DESC3_FD_POS 29
+#define TX_NORMAL_DESC3_FD_LEN 1
+#define TX_NORMAL_DESC3_FL_POS 0
+#define TX_NORMAL_DESC3_FL_LEN 15
+#define TX_NORMAL_DESC3_LD_POS 28
+#define TX_NORMAL_DESC3_LD_LEN 1
+#define TX_NORMAL_DESC3_OWN_POS 31
+#define TX_NORMAL_DESC3_OWN_LEN 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4
+#define TX_NORMAL_DESC3_TCPPL_POS 0
+#define TX_NORMAL_DESC3_TCPPL_LEN 18
+#define TX_NORMAL_DESC3_TSE_POS 18
+#define TX_NORMAL_DESC3_TSE_LEN 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+#define XLGMAC_MTL_REG(pdata, n, reg) \
+ ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg))
+
+#define XLGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg))
+
+#endif /* __DWC_XLGMAC_REG_H__ */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
new file mode 100644
index 000000000000..cab3e40a86b9
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -0,0 +1,660 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_H__
+#define __DWC_XLGMAC_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/timecounter.h>
+
+#define XLGMAC_DRV_NAME "dwc-xlgmac"
+#define XLGMAC_DRV_VERSION "1.0.0"
+#define XLGMAC_DRV_DESC "Synopsys DWC XLGMAC Driver"
+
+/* Descriptor related parameters */
+#define XLGMAC_TX_DESC_CNT 1024
+#define XLGMAC_TX_DESC_MIN_FREE (XLGMAC_TX_DESC_CNT >> 3)
+#define XLGMAC_TX_DESC_MAX_PROC (XLGMAC_TX_DESC_CNT >> 1)
+#define XLGMAC_RX_DESC_CNT 1024
+#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
+
+/* Descriptors required for maximum contiguous TSO/GSO packet */
+#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for a SKB */
+#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
+
+#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XLGMAC_RX_BUF_ALIGN 64
+
+/* Maximum Size for Splitting the Header Data
+ * Keep in sync with SKB_ALLOC_SIZE
+ * 3'b000: 64 bytes, 3'b001: 128 bytes
+ * 3'b010: 256 bytes, 3'b011: 512 bytes
+ * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved
+ */
+#define XLGMAC_SPH_HDSMS_SIZE 3
+#define XLGMAC_SKB_ALLOC_SIZE 512
+
+#define XLGMAC_MAX_FIFO 81920
+
+#define XLGMAC_MAX_DMA_CHANNELS 16
+#define XLGMAC_DMA_STOP_TIMEOUT 5
+#define XLGMAC_DMA_INTERRUPT_MASK 0x31c7
+
+/* Default coalescing parameters */
+#define XLGMAC_INIT_DMA_TX_USECS 1000
+#define XLGMAC_INIT_DMA_TX_FRAMES 25
+#define XLGMAC_INIT_DMA_RX_USECS 30
+#define XLGMAC_INIT_DMA_RX_FRAMES 25
+#define XLGMAC_MAX_DMA_RIWT 0xff
+#define XLGMAC_MIN_DMA_RIWT 0x01
+
+/* Flow control queue count */
+#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* System clock is 125 MHz */
+#define XLGMAC_SYSCLOCK 125000000
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XLGMAC_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XLGMAC_RSS_HASH_KEY_SIZE 40
+#define XLGMAC_RSS_MAX_TABLE_SIZE 256
+#define XLGMAC_RSS_LOOKUP_TABLE_TYPE 0
+#define XLGMAC_RSS_HASH_KEY_TYPE 1
+
+#define XLGMAC_STD_PACKET_MTU 1500
+#define XLGMAC_JUMBO_PACKET_MTU 9000
+
+/* Helper macro for descriptor handling
+ * Always use XLGMAC_GET_DESC_DATA to access the descriptor data
+ */
+#define XLGMAC_GET_DESC_DATA(ring, idx) ({ \
+ typeof(ring) _ring = (ring); \
+ ((_ring)->desc_data_head + \
+ ((idx) & ((_ring)->dma_desc_count - 1))); \
+})
+
+#define XLGMAC_GET_REG_BITS(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(var) _var = le32_to_cpu((var)); \
+ ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+})
+
+#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+ cpu_to_le32(_var); \
+})
+
+struct xlgmac_pdata;
+
+enum xlgmac_int {
+ XLGMAC_INT_DMA_CH_SR_TI,
+ XLGMAC_INT_DMA_CH_SR_TPS,
+ XLGMAC_INT_DMA_CH_SR_TBU,
+ XLGMAC_INT_DMA_CH_SR_RI,
+ XLGMAC_INT_DMA_CH_SR_RBU,
+ XLGMAC_INT_DMA_CH_SR_RPS,
+ XLGMAC_INT_DMA_CH_SR_TI_RI,
+ XLGMAC_INT_DMA_CH_SR_FBE,
+ XLGMAC_INT_DMA_ALL,
+};
+
+struct xlgmac_stats {
+ /* MMC TX counters */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* MMC RX counters */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+
+ /* Extra counters */
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+ u64 tx_process_stopped;
+ u64 rx_process_stopped;
+ u64 tx_buffer_unavailable;
+ u64 rx_buffer_unavailable;
+ u64 fatal_bus_error;
+ u64 tx_vlan_packets;
+ u64 rx_vlan_packets;
+ u64 napi_poll_isr;
+ u64 napi_poll_txtimer;
+};
+
+struct xlgmac_ring_buf {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_len;
+};
+
+/* Common Tx and Rx DMA hardware descriptor */
+struct xlgmac_dma_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xlgmac_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xlgmac_buffer_data {
+ struct xlgmac_page_alloc pa;
+ struct xlgmac_page_alloc pa_unmap;
+
+ dma_addr_t dma_base;
+ unsigned long dma_off;
+ unsigned int dma_len;
+};
+
+/* Tx-related desc data */
+struct xlgmac_tx_desc_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related desc data */
+struct xlgmac_rx_desc_data {
+ struct xlgmac_buffer_data hdr; /* Header locations */
+ struct xlgmac_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+struct xlgmac_pkt_info {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ /* descriptors needed for this packet */
+ unsigned int desc_count;
+ unsigned int length;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+};
+
+struct xlgmac_desc_data {
+ /* dma_desc: Virtual address of descriptor
+ * dma_desc_addr: DMA address of descriptor
+ */
+ struct xlgmac_dma_desc *dma_desc;
+ dma_addr_t dma_desc_addr;
+
+ /* skb: Virtual address of SKB
+ * skb_dma: DMA address of SKB data
+ * skb_dma_len: Length of SKB DMA area
+ */
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_dma_len;
+
+ /* Tx/Rx -related data */
+ struct xlgmac_tx_desc_data tx;
+ struct xlgmac_rx_desc_data rx;
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xlgmac_ring {
+ /* Per packet related information */
+ struct xlgmac_pkt_info pkt_info;
+
+ /* Virtual/DMA addresses of DMA descriptor list and the total count */
+ struct xlgmac_dma_desc *dma_desc_head;
+ dma_addr_t dma_desc_head_addr;
+ unsigned int dma_desc_count;
+
+ /* Array of descriptor data corresponding the DMA descriptor
+ * (always use the XLGMAC_GET_DESC_DATA macro to access this data)
+ */
+ struct xlgmac_desc_data *desc_data_head;
+
+ /* Page allocation for RX buffers */
+ struct xlgmac_page_alloc rx_hdr_pa;
+ struct xlgmac_page_alloc rx_buf_pa;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int xmit_more;
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+struct xlgmac_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xlgmac_pdata *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct timer_list tx_timer;
+
+ struct xlgmac_ring *tx_ring;
+ struct xlgmac_ring *rx_ring;
+} ____cacheline_aligned;
+
+struct xlgmac_desc_ops {
+ int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+ void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
+ int (*map_tx_skb)(struct xlgmac_channel *channel,
+ struct sk_buff *skb);
+ int (*map_rx_buffer)(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data);
+ void (*unmap_desc_data)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data);
+ void (*tx_desc_init)(struct xlgmac_pdata *pdata);
+ void (*rx_desc_init)(struct xlgmac_pdata *pdata);
+};
+
+struct xlgmac_hw_ops {
+ int (*init)(struct xlgmac_pdata *pdata);
+ int (*exit)(struct xlgmac_pdata *pdata);
+
+ int (*tx_complete)(struct xlgmac_dma_desc *dma_desc);
+
+ void (*enable_tx)(struct xlgmac_pdata *pdata);
+ void (*disable_tx)(struct xlgmac_pdata *pdata);
+ void (*enable_rx)(struct xlgmac_pdata *pdata);
+ void (*disable_rx)(struct xlgmac_pdata *pdata);
+
+ int (*enable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ int (*disable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ void (*dev_xmit)(struct xlgmac_channel *channel);
+ int (*dev_read)(struct xlgmac_channel *channel);
+
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*config_rx_mode)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
+
+ /* For MII speed configuration */
+ int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata);
+
+ /* For descriptor related operation */
+ void (*tx_desc_init)(struct xlgmac_channel *channel);
+ void (*rx_desc_init)(struct xlgmac_channel *channel);
+ void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data);
+ void (*rx_desc_reset)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index);
+ int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc);
+ int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc);
+ void (*tx_start_xmit)(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring);
+
+ /* For Flow Control */
+ int (*config_tx_flow_control)(struct xlgmac_pdata *pdata);
+ int (*config_rx_flow_control)(struct xlgmac_pdata *pdata);
+
+ /* For Vlan related config */
+ int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xlgmac_pdata *pdata);
+ int (*config_tx_coalesce)(struct xlgmac_pdata *pdata);
+ unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata,
+ unsigned int usec);
+ unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata,
+ unsigned int riwt);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xlgmac_pdata *pdata);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_pblx8)(struct xlgmac_pdata *pdata);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*tx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*read_mmc_stats)(struct xlgmac_pdata *pdata);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xlgmac_pdata *pdata);
+ int (*disable_rss)(struct xlgmac_pdata *pdata);
+ int (*set_rss_hash_key)(struct xlgmac_pdata *pdata,
+ const u8 *key);
+ int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata,
+ const u32 *table);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xlgmac_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int phyifsel; /* PHY interface support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xlgmac_resources {
+ void __iomem *addr;
+ int irq;
+};
+
+struct xlgmac_pdata {
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct xlgmac_hw_ops hw_ops;
+ struct xlgmac_desc_ops desc_ops;
+
+ /* Device statistics */
+ struct xlgmac_stats stats;
+
+ u32 msg_enable;
+
+ /* MAC registers base */
+ void __iomem *mac_regs;
+
+ /* Hardware features of the device */
+ struct xlgmac_hw_features hw_feat;
+
+ struct work_struct restart_work;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xlgmac_channel *channel_head;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_desc_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Device interrupt number */
+ int dev_irq;
+ unsigned int per_channel_irq;
+ int channel_irq[XLGMAC_MAX_DMA_CHANNELS];
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ int phy_speed;
+
+ char drv_name[32];
+ char drv_ver[32];
+};
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops);
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops);
+const struct net_device_ops *xlgmac_get_netdev_ops(void);
+const struct ethtool_ops *xlgmac_get_ethtool_ops(void);
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag);
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx);
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx);
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata);
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata);
+int xlgmac_drv_probe(struct device *dev,
+ struct xlgmac_resources *res);
+int xlgmac_drv_remove(struct device *dev);
+
+/* For debug prints */
+#ifdef XLGMAC_DEBUG
+#define XLGMAC_PR(fmt, args...) \
+ pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args)
+#else
+#define XLGMAC_PR(x...) do { } while (0)
+#endif
+
+#endif /* __DWC_XLGMAC_H__ */
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index f864fd0663db..711fbbbc4b1f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2124,33 +2124,26 @@ static const char
};
/*
- * bdx_get_settings - get device-specific settings
+ * bdx_get_link_ksettings - get device-specific settings
* @netdev
* @ecmd
*/
-static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- u32 rdintcm;
- u32 tdintcm;
- struct bdx_priv *priv = netdev_priv(netdev);
-
- rdintcm = priv->rdintcm;
- tdintcm = priv->tdintcm;
-
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
- ecmd->autoneg = AUTONEG_DISABLE;
-
- /* PCK_TH measures in multiples of FIFO bytes
- We translate to packets */
- ecmd->maxtxpkt =
- ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
- ecmd->maxrxpkt =
- ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
+static int bdx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+
+ ecmd->base.speed = SPEED_10000;
+ ecmd->base.duplex = DUPLEX_FULL;
+ ecmd->base.port = PORT_FIBRE;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -2384,7 +2377,6 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
- .get_settings = bdx_get_settings,
.get_drvinfo = bdx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = bdx_get_coalesce,
@@ -2394,6 +2386,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.get_strings = bdx_get_strings,
.get_sset_count = bdx_get_sset_count,
.get_ethtool_stats = bdx_get_ethtool_stats,
+ .get_link_ksettings = bdx_get_link_ksettings,
};
netdev->ethtool_ops = &bdx_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 7c7ae0890e90..729a7da90b5b 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1134,7 +1134,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
u32 buf_len = skb_frag_size(frag);
dma_addr_t desc_dma;
u32 desc_dma_32;
- u32 pkt_info;
dma_addr = dma_map_page(dev, page, page_offset, buf_len,
DMA_TO_DEVICE);
@@ -1151,9 +1150,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
}
desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
- pkt_info =
- (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
- KNAV_DMA_DESC_RETQ_SHIFT;
set_pkt_info(dma_addr, buf_len, 0, ndesc);
desc_dma_32 = (u32)desc_dma;
set_words(&desc_dma_32, 1, &pdesc->next_desc);
@@ -1882,6 +1878,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
+ u8 num_tc;
int i;
/* setup tc must be called under rtnl lock */
@@ -1890,15 +1887,18 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = tc->mqprio->num_tc;
+
/* Sanity-check the number of traffic classes requested */
if ((dev->real_num_tx_queues <= 1) ||
- (dev->real_num_tx_queues < tc->tc))
+ (dev->real_num_tx_queues < num_tc))
return -EINVAL;
/* Configure traffic class to queue mappings */
- if (tc->tc) {
- netdev_set_num_tc(dev, tc->tc);
- for (i = 0; i < tc->tc; i++)
+ if (num_tc) {
+ netdev_set_num_tc(dev, num_tc);
+ for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(dev, i, 1, i);
} else {
netdev_reset_tc(dev);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index eece3e2eec14..897176fc5043 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3048,8 +3048,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
for_each_child_of_node(node, port) {
slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
if (!slave) {
- dev_err(dev,
- "memomry alloc failed for secondary port(%s), skipping...\n",
+ dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
port->name);
continue;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 72013314bba8..fa6a06571187 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int gelic_ether_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int gelic_ether_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
+ u32 supported, advertising;
gelic_card_get_ether_port_status(card, 0);
if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
case GELIC_LV1_ETHER_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
case GELIC_LV1_ETHER_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case GELIC_LV1_ETHER_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
default:
pr_info("%s: speed unknown\n", __func__);
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
}
- cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
+ supported = SUPPORTED_TP | SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full;
- cmd->advertising = cmd->supported;
+ advertising = supported;
if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising &= ~ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ advertising &= ~ADVERTISED_Autoneg;
}
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gelic_ether_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int
+gelic_ether_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
u64 mode;
int ret;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
mode = GELIC_LV1_ETHER_AUTO_NEG;
} else {
- switch (cmd->speed) {
+ switch (cmd->base.speed) {
case SPEED_10:
mode = GELIC_LV1_ETHER_SPEED_10;
break;
@@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev,
default:
return -EINVAL;
}
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL) {
mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
- else if (cmd->speed == SPEED_1000) {
+ } else if (cmd->base.speed == SPEED_1000) {
pr_info("1000 half duplex is not supported.\n");
return -EINVAL;
}
@@ -1370,11 +1377,11 @@ done:
static const struct ethtool_ops gelic_ether_ethtool_ops = {
.get_drvinfo = gelic_net_get_drvinfo,
- .get_settings = gelic_ether_get_settings,
- .set_settings = gelic_ether_set_settings,
.get_link = ethtool_op_get_link,
.get_wol = gelic_net_get_wol,
.set_wol = gelic_net_set_wol,
+ .get_link_ksettings = gelic_ether_get_link_ksettings,
+ .set_link_ksettings = gelic_ether_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index ffe519382e11..16bd036d0682 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -47,19 +47,23 @@ static struct {
};
static int
-spider_net_ethtool_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct spider_net_card *card;
card = netdev_priv(netdev);
- cmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- cmd->port = PORT_FIBRE;
- ethtool_cmd_speed_set(cmd, card->phy.speed);
- cmd->duplex = DUPLEX_FULL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.speed = card->phy.speed;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
}
const struct ethtool_ops spider_net_ethtool_ops = {
- .get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
.get_wol = spider_net_ethtool_get_wol,
.get_msglevel = spider_net_ethtool_get_msglevel,
@@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
.get_strings = spider_net_get_strings,
.get_sset_count = spider_net_get_sset_count,
.get_ethtool_stats = spider_net_get_ethtool_stats,
+ .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c5583991da4a..5ac6eaa9e785 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev)
TSI_WRITE(TSI108_EC_INTMASK, ~0);
}
-static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_gset(&data->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
-static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_sset(&data->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
@@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = tsi108_get_settings,
- .set_settings = tsi108_set_settings,
+ .get_link_ksettings = tsi108_get_link_ksettings,
+ .set_link_ksettings = tsi108_set_link_ksettings,
};
static const struct net_device_ops tsi108_netdev_ops = {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c068c58428f7..4cf41f779d0e 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_gset(&rp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
mutex_unlock(&rp->task_lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_sset(&rp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
rhine_set_carrier(&rp->mii_if);
mutex_unlock(&rp->task_lock);
@@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.get_wol = rhine_get_wol,
.set_wol = rhine_set_wol,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d088788b27a7..ef9538ee53d0 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev)
velocity_set_power_state(vptr, PCI_D3hot);
}
-static int velocity_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem *regs = vptr->mac_regs;
u32 status;
+ u32 supported, advertising;
+
status = check_connection_type(vptr->mac_regs);
- cmd->supported = SUPPORTED_TP |
+ supported = SUPPORTED_TP |
SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev,
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+ advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
- cmd->advertising |=
+ advertising |=
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev,
} else {
switch (vptr->options.spd_dpx) {
case SPD_DPX_1000_FULL:
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
break;
case SPD_DPX_100_HALF:
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
break;
case SPD_DPX_100_FULL:
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case SPD_DPX_10_HALF:
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
break;
case SPD_DPX_10_FULL:
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
break;
default:
break;
@@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev,
}
if (status & VELOCITY_SPEED_1000)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & VELOCITY_SPEED_100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
- cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- cmd->port = PORT_TP;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+ cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
if (status & VELOCITY_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int velocity_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 curr_status;
u32 new_status = 0;
int ret = 0;
@@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev,
curr_status = check_connection_type(vptr->mac_regs);
curr_status &= (~VELOCITY_LINK_FAIL);
- new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+ new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
- new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+ new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
+ VELOCITY_DUPLEX_FULL : 0);
if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
(new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
@@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops velocity_ethtool_ops = {
- .get_settings = velocity_get_settings,
- .set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
@@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = {
.get_coalesce = velocity_get_coalesce,
.set_coalesce = velocity_set_coalesce,
.begin = velocity_ethtool_up,
- .complete = velocity_ethtool_down
+ .complete = velocity_ethtool_down,
+ .get_link_ksettings = velocity_get_link_ksettings,
+ .set_link_ksettings = velocity_set_link_ksettings,
};
#if defined(CONFIG_PM) && defined(CONFIG_INET)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index f90267f0519f..2bdfb39215e9 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1152,7 +1152,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
+ priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ netdev_name(ndev));
if (!priv->xfer_wq) {
err = -ENOMEM;
goto err_wq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b96e96919e31..33c595f4691d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -301,7 +301,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,