aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-systemport.txt29
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt23
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz9021.txt49
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt83
-rw-r--r--Documentation/devicetree/bindings/net/via-rhine.txt17
-rw-r--r--Documentation/driver-model/devres.txt5
-rw-r--r--Documentation/networking/bonding.txt44
-rw-r--r--Documentation/networking/filter.txt171
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi6
-rw-r--r--arch/sparc/include/asm/checksum_32.h12
-rw-r--r--arch/sparc/include/asm/checksum_64.h12
-rw-r--r--arch/x86/include/asm/checksum_64.h9
-rw-r--r--drivers/bluetooth/btmrvl_drv.h4
-rw-r--r--drivers/bluetooth/btmrvl_main.c19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c103
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h3
-rw-r--r--drivers/bluetooth/hci_h4.c7
-rw-r--r--drivers/clk/ti/clk-43xx.c16
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c111
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_alb.c116
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_options.c56
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_sysfs.c553
-rw-r--r--drivers/net/bonding/bonding.h26
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c51
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)93
-rw-r--r--drivers/net/can/usb/Kconfig4
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c15
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c33
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1614
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h677
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c370
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c193
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c251
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c278
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c111
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c22
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c316
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c93
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c52
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h54
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c319
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c246
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c32
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c188
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c119
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw.c56
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c48
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c506
-rw-r--r--drivers/net/hyperv/hyperv_net.h159
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c111
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c133
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c119
-rw-r--r--drivers/net/phy/at803x.c39
-rw-r--r--drivers/net/phy/mdio_bus.c67
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vxlan.c63
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c513
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c766
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c219
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h98
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c19
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c97
-rw-r--r--drivers/net/wireless/b43/phy_common.c6
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c304
-rw-r--r--drivers/net/wireless/b43/radio_2056.c418
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c72
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c94
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c56
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c272
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c330
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c177
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c46
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c84
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c47
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c62
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c21
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c37
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c9
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c9
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c23
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c77
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/staging/rtl8821ae/core.c14
-rw-r--r--include/linux/ethtool.h13
-rw-r--r--include/linux/filter.h99
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/skbuff.h93
-rw-r--r--include/linux/spi/at86rf230.h14
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/6lowpan.h1
-rw-r--r--include/net/addrconf.h5
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h13
-rw-r--r--include/net/cfg80211.h86
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/dsa.h5
-rw-r--r--include/net/inet_ecn.h2
-rw-r--r--include/net/ip.h24
-rw-r--r--include/net/ip6_checksum.h7
-rw-r--r--include/net/ipv6.h19
-rw-r--r--include/net/mac80211.h31
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/regulatory.h6
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/snmp.h32
-rw-r--r--include/net/tcp.h25
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/capability.h7
-rw-r--r--include/uapi/linux/ethtool.h32
-rw-r--r--include/uapi/linux/filter.h3
-rw-r--r--include/uapi/linux/if_fddi.h90
-rw-r--r--include/uapi/linux/nl80211.h42
-rw-r--r--include/uapi/linux/tipc.h23
-rw-r--r--include/uapi/linux/tipc_config.h10
-rw-r--r--kernel/audit.c64
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_bpf.c1546
-rw-r--r--net/8021q/vlan_dev.c58
-rw-r--r--net/bluetooth/hci_conn.c81
-rw-r--r--net/bluetooth/hci_core.c20
-rw-r--r--net/bluetooth/hci_event.c246
-rw-r--r--net/bluetooth/hci_sock.c11
-rw-r--r--net/bluetooth/lib.c1
-rw-r--r--net/bluetooth/mgmt.c36
-rw-r--r--net/core/dev.c50
-rw-r--r--net/core/ethtool.c223
-rw-r--r--net/core/filter.c600
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/ieee802154/reassembly.c48
-rw-r--r--net/ipv4/af_inet.c93
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/gre_demux.c24
-rw-r--r--net/ipv4/icmp.c12
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/proc.c24
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_bic.c5
-rw-r--r--net/ipv4/tcp_cong.c24
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c4
-rw-r--r--net/ipv4/tcp_hybla.c7
-rw-r--r--net/ipv4/tcp_illinois.c5
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_output.c77
-rw-r--r--net/ipv4/tcp_scalable.c5
-rw-r--r--net/ipv4/tcp_vegas.c7
-rw-r--r--net/ipv4/tcp_veno.c9
-rw-r--r--net/ipv4/tcp_yeah.c5
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv6/addrconf.c49
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c42
-rw-r--r--net/ipv6/icmp.c35
-rw-r--r--net/ipv6/ip6_checksum.c27
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c64
-rw-r--r--net/ipv6/ping.c7
-rw-r--r--net/ipv6/proc.c6
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c48
-rw-r--r--net/l2tp/l2tp_core.c57
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/mac80211/aes_ccm.c37
-rw-r--r--net/mac80211/cfg.c53
-rw-r--r--net/mac80211/chan.c614
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_netdev.h2
-rw-r--r--net/mac80211/driver-ops.h8
-rw-r--r--net/mac80211/ht.c22
-rw-r--r--net/mac80211/ibss.c75
-rw-r--r--net/mac80211/ieee80211_i.h33
-rw-r--r--net/mac80211/iface.c36
-rw-r--r--net/mac80211/main.c8
-rw-r--r--net/mac80211/mesh.c36
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/michael.h1
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rx.c19
-rw-r--r--net/mac80211/scan.c10
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/util.c145
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/netfilter/nfnetlink.c10
-rw-r--r--net/netlink/af_netlink.c70
-rw-r--r--net/netlink/af_netlink.h6
-rw-r--r--net/openvswitch/vport-vxlan.c3
-rw-r--r--net/rds/iw_sysctl.c3
-rw-r--r--net/rds/sysctl.c3
-rw-r--r--net/sched/cls_api.c26
-rw-r--r--net/sched/cls_basic.c10
-rw-r--r--net/sched/cls_bpf.c10
-rw-r--r--net/sched/cls_cgroup.c4
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c11
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/sctp/sysctl.c21
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/bcast.c182
-rw-r--r--net/tipc/bcast.h9
-rw-r--r--net/tipc/bearer.c117
-rw-r--r--net/tipc/bearer.h12
-rw-r--r--net/tipc/config.c12
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/core.h8
-rw-r--r--net/tipc/discover.c66
-rw-r--r--net/tipc/discover.h1
-rw-r--r--net/tipc/handler.c134
-rw-r--r--net/tipc/link.c141
-rw-r--r--net/tipc/link.h7
-rw-r--r--net/tipc/name_distr.c78
-rw-r--r--net/tipc/name_distr.h35
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c68
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/node.c103
-rw-r--r--net/tipc/node.h90
-rw-r--r--net/tipc/node_subscr.c9
-rw-r--r--net/tipc/node_subscr.h2
-rw-r--r--net/tipc/socket.c29
-rw-r--r--net/wireless/Kconfig37
-rw-r--r--net/wireless/chan.c176
-rw-r--r--net/wireless/core.c75
-rw-r--r--net/wireless/core.h46
-rw-r--r--net/wireless/ethtool.c10
-rw-r--r--net/wireless/ibss.c41
-rw-r--r--net/wireless/mesh.c28
-rw-r--r--net/wireless/mlme.c38
-rw-r--r--net/wireless/nl80211.c499
-rw-r--r--net/wireless/rdev-ops.h13
-rw-r--r--net/wireless/reg.c153
-rw-r--r--net/wireless/reg.h18
-rw-r--r--net/wireless/scan.c154
-rw-r--r--net/wireless/sme.c40
-rw-r--r--net/wireless/trace.h29
-rw-r--r--net/wireless/util.c171
-rw-r--r--net/wireless/wext-compat.c40
-rw-r--r--net/wireless/wext-compat.h2
-rw-r--r--net/wireless/wext-sme.c12
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_proc.c3
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--tools/net/bpf_exp.l1
-rw-r--r--tools/net/bpf_exp.y11
-rw-r--r--tools/testing/selftests/net/Makefile8
535 files changed, 17416 insertions, 8943 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 044b76436e83..d9b9416c989f 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -100,6 +100,7 @@
!Finclude/net/cfg80211.h wdev_priv
!Finclude/net/cfg80211.h ieee80211_iface_limit
!Finclude/net/cfg80211.h ieee80211_iface_combination
+!Finclude/net/cfg80211.h cfg80211_check_combinations
</chapter>
<chapter>
<title>Actions and configuration</title>
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
new file mode 100644
index 000000000000..1b7600e022dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
@@ -0,0 +1,29 @@
+* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
+
+Required properties:
+- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
+- reg: address and length of the register set for the device.
+- interrupts: interrupts for the device, first cell must be for the the rx
+ interrupts, and the second cell should be for the transmit queues
+- local-mac-address: Ethernet MAC address (48 bits) of this adapter
+- phy-mode: Should be a string describing the PHY interface to the
+ Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
+- fixed-link: see Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for
+ the property specific details
+
+Optional properties:
+- systemport,num-tier2-arb: number of tier 2 arbiters, an integer
+- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
+- systemport,num-txq: number of HW transmit queues, an integer
+- systemport,num-rxq: number of HW receive queues, an integer
+
+Example:
+ethernet@f04a0000 {
+ compatible = "brcm,systemport-v1.00";
+ reg = <0xf04a0000 0x4650>;
+ local-mac-address = [ 00 11 22 33 44 55 ];
+ fixed-link = <0 1 1000 0 0>;
+ phy-mode = "gmii";
+ interrupts = <0x0 0x16 0x0>,
+ <0x0 0x17 0x0>;
+};
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
new file mode 100644
index 000000000000..d3bbdded4cbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
@@ -0,0 +1,23 @@
+* AT86RF230 IEEE 802.15.4 *
+
+Required properties:
+ - compatible: should be "atmel,at86rf230", "atmel,at86rf231",
+ "atmel,at86rf233" or "atmel,at86rf212"
+ - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
+ sync or async operation mode
+ - reg: the chipselect index
+ - interrupts: the interrupt generated by the device
+
+Optional properties:
+ - reset-gpio: GPIO spec for the rstn pin
+ - sleep-gpio: GPIO spec for the slp_tr pin
+
+Example:
+
+ at86rf231@0 {
+ compatible = "atmel,at86rf231";
+ spi-max-frequency = <7500000>;
+ reg = <0>;
+ interrupts = <19 1>;
+ interrupt-parent = <&gpio3>;
+ };
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
deleted file mode 100644
index 997a63f1aea1..000000000000
--- a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-Micrel KSZ9021 Gigabit Ethernet PHY
-
-Some boards require special tuning values, particularly when it comes to
-clock delays. You can specify clock delay values by adding
-micrel-specific properties to an Ethernet OF device node.
-
-All skew control options are specified in picoseconds. The minimum
-value is 0, and the maximum value is 3000.
-
-Optional properties:
- - rxc-skew-ps : Skew control of RXC pad
- - rxdv-skew-ps : Skew control of RX CTL pad
- - txc-skew-ps : Skew control of TXC pad
- - txen-skew-ps : Skew control of TX_CTL pad
- - rxd0-skew-ps : Skew control of RX data 0 pad
- - rxd1-skew-ps : Skew control of RX data 1 pad
- - rxd2-skew-ps : Skew control of RX data 2 pad
- - rxd3-skew-ps : Skew control of RX data 3 pad
- - txd0-skew-ps : Skew control of TX data 0 pad
- - txd1-skew-ps : Skew control of TX data 1 pad
- - txd2-skew-ps : Skew control of TX data 2 pad
- - txd3-skew-ps : Skew control of TX data 3 pad
-
-Examples:
-
- /* Attach to an Ethernet device with autodetected PHY */
- &enet {
- rxc-skew-ps = <3000>;
- rxdv-skew-ps = <0>;
- txc-skew-ps = <3000>;
- txen-skew-ps = <0>;
- status = "okay";
- };
-
- /* Attach to an explicitly-specified PHY */
- mdio {
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <3000>;
- rxdv-skew-ps = <0>;
- txc-skew-ps = <3000>;
- txen-skew-ps = <0>;
- reg = <0>;
- };
- };
- ethernet@70000 {
- status = "okay";
- phy = <&phy0>;
- phy-mode = "rgmii-id";
- };
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
new file mode 100644
index 000000000000..692076fda0e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -0,0 +1,83 @@
+Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+Note that these settings are applied after any phy-specific fixup from
+phy_fixup_list (see phy_init_hw() from drivers/net/phy/phy_device.c),
+and therefore may overwrite them.
+
+KSZ9021:
+
+ All skew control options are specified in picoseconds. The minimum
+ value is 0, the maximum value is 3000, and it is incremented by 200ps
+ steps.
+
+ Optional properties:
+
+ - rxc-skew-ps : Skew control of RXC pad
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txc-skew-ps : Skew control of TXC pad
+ - txen-skew-ps : Skew control of TX CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+KSZ9031:
+
+ All skew control options are specified in picoseconds. The minimum
+ value is 0, and the maximum is property-dependent. The increment
+ step is 60ps.
+
+ Optional properties:
+
+ Maximum value of 1860:
+
+ - rxc-skew-ps : Skew control of RX clock pad
+ - txc-skew-ps : Skew control of TX clock pad
+
+ Maximum value of 900:
+
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txen-skew-ps : Skew control of TX CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+ /* Attach to an Ethernet device with autodetected PHY */
+ &enet {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ status = "okay";
+ };
+
+ /* Attach to an explicitly-specified PHY */
+ mdio {
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ reg = <0>;
+ };
+ };
+ ethernet@70000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt
new file mode 100644
index 000000000000..334eca2bf937
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/via-rhine.txt
@@ -0,0 +1,17 @@
+* VIA Rhine 10/100 Network Controller
+
+Required properties:
+- compatible : Should be "via,vt8500-rhine" for integrated
+ Rhine controllers found in VIA VT8500, WonderMedia WM8950
+ and similar. These are listed as 1106:3106 rev. 0x84 on the
+ virtual PCI bus under vendor-provided kernels
+- reg : Address and length of the io space
+- interrupts : Should contain the controller interrupt line
+
+Examples:
+
+ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 4f7897e99cba..c74e04494ade 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -308,3 +308,8 @@ SLAVE DMA ENGINE
SPI
devm_spi_register_master()
+
+MDIO
+ devm_mdiobus_alloc()
+ devm_mdiobus_alloc_size()
+ devm_mdiobus_free()
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index a383c00392d0..9c723ecd0025 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -585,13 +585,19 @@ mode
balance-tlb or 5
Adaptive transmit load balancing: channel bonding that
- does not require any special switch support. The
- outgoing traffic is distributed according to the
- current load (computed relative to the speed) on each
- slave. Incoming traffic is received by the current
- slave. If the receiving slave fails, another slave
- takes over the MAC address of the failed receiving
- slave.
+ does not require any special switch support.
+
+ In tlb_dynamic_lb=1 mode; the outgoing traffic is
+ distributed according to the current load (computed
+ relative to the speed) on each slave.
+
+ In tlb_dynamic_lb=0 mode; the load balancing based on
+ current load is disabled and the load is distributed
+ only using the hash distribution.
+
+ Incoming traffic is received by the current slave.
+ If the receiving slave fails, another slave takes over
+ the MAC address of the failed receiving slave.
Prerequisite:
@@ -736,6 +742,28 @@ primary_reselect
This option was added for bonding version 3.6.0.
+tlb_dynamic_lb
+
+ Specifies if dynamic shuffling of flows is enabled in tlb
+ mode. The value has no effect on any other modes.
+
+ The default behavior of tlb mode is to shuffle active flows across
+ slaves based on the load in that interval. This gives nice lb
+ characteristics but can cause packet reordering. If re-ordering is
+ a concern use this variable to disable flow shuffling and rely on
+ load balancing provided solely by the hash distribution.
+ xmit-hash-policy can be used to select the appropriate hashing for
+ the setup.
+
+ The sysfs entry can be used to change the setting per bond device
+ and the initial value is derived from the module parameter. The
+ sysfs entry is allowed to be changed only if the bond device is
+ down.
+
+ The default value is "1" that enables flow shuffling while value "0"
+ disables it. This option was added in bonding driver 3.7.1
+
+
updelay
Specifies the time, in milliseconds, to wait before enabling a
@@ -769,7 +797,7 @@ use_carrier
xmit_hash_policy
Selects the transmit hash policy to use for slave selection in
- balance-xor and 802.3ad modes. Possible values are:
+ balance-xor, 802.3ad, and tlb modes. Possible values are:
layer2
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 81f940f4e884..748fd385535d 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -281,6 +281,7 @@ Possible BPF extensions are shown in the following table:
cpu raw_smp_processor_id()
vlan_tci vlan_tx_tag_get(skb)
vlan_pr vlan_tx_tag_present(skb)
+ rand prandom_u32()
These extensions can also be prefixed with '#'.
Examples for low-level BPF:
@@ -308,6 +309,18 @@ Examples for low-level BPF:
ret #-1
drop: ret #0
+** icmp random packet sampling, 1 in 4
+ ldh [12]
+ jne #0x800, drop
+ ldb [23]
+ jneq #1, drop
+ # get a random uint32 number
+ ld rand
+ mod #4
+ jneq #1, drop
+ ret #-1
+ drop: ret #0
+
** SECCOMP filter example:
ld [4] /* offsetof(struct seccomp_data, arch) */
@@ -600,7 +613,7 @@ Some core changes of the new internal format:
Therefore, BPF calling convention is defined as:
- * R0 - return value from in-kernel function
+ * R0 - return value from in-kernel function, and exit value for BPF program
* R1 - R5 - arguments from BPF program to in-kernel function
* R6 - R9 - callee saved registers that in-kernel function will preserve
* R10 - read-only frame pointer to access stack
@@ -646,9 +659,140 @@ Some core changes of the new internal format:
- Introduces bpf_call insn and register passing convention for zero overhead
calls from/to other kernel functions:
- After a kernel function call, R1 - R5 are reset to unreadable and R0 has a
- return type of the function. Since R6 - R9 are callee saved, their state is
- preserved across the call.
+ Before an in-kernel function call, the internal BPF program needs to
+ place function arguments into R1 to R5 registers to satisfy calling
+ convention, then the interpreter will take them from registers and pass
+ to in-kernel function. If R1 - R5 registers are mapped to CPU registers
+ that are used for argument passing on given architecture, the JIT compiler
+ doesn't need to emit extra moves. Function arguments will be in the correct
+ registers and BPF_CALL instruction will be JITed as single 'call' HW
+ instruction. This calling convention was picked to cover common call
+ situations without performance penalty.
+
+ After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
+ a return value of the function. Since R6 - R9 are callee saved, their state
+ is preserved across the call.
+
+ For example, consider three C functions:
+
+ u64 f1() { return (*_f2)(1); }
+ u64 f2(u64 a) { return f3(a + 1, a); }
+ u64 f3(u64 a, u64 b) { return a - b; }
+
+ GCC can compile f1, f3 into x86_64:
+
+ f1:
+ movl $1, %edi
+ movq _f2(%rip), %rax
+ jmp *%rax
+ f3:
+ movq %rdi, %rax
+ subq %rsi, %rax
+ ret
+
+ Function f2 in BPF may look like:
+
+ f2:
+ bpf_mov R2, R1
+ bpf_add R1, 1
+ bpf_call f3
+ bpf_exit
+
+ If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
+ returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
+ be used to call into f2.
+
+ For practical reasons all BPF programs have only one argument 'ctx' which is
+ already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
+ can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
+ are currently not supported, but these restrictions can be lifted if necessary
+ in the future.
+
+ On 64-bit architectures all register map to HW registers one to one. For
+ example, x86_64 JIT compiler can map them as ...
+
+ R0 - rax
+ R1 - rdi
+ R2 - rsi
+ R3 - rdx
+ R4 - rcx
+ R5 - r8
+ R6 - rbx
+ R7 - r13
+ R8 - r14
+ R9 - r15
+ R10 - rbp
+
+ ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
+ and rbx, r12 - r15 are callee saved.
+
+ Then the following internal BPF pseudo-program:
+
+ bpf_mov R6, R1 /* save ctx */
+ bpf_mov R2, 2
+ bpf_mov R3, 3
+ bpf_mov R4, 4
+ bpf_mov R5, 5
+ bpf_call foo
+ bpf_mov R7, R0 /* save foo() return value */
+ bpf_mov R1, R6 /* restore ctx for next call */
+ bpf_mov R2, 6
+ bpf_mov R3, 7
+ bpf_mov R4, 8
+ bpf_mov R5, 9
+ bpf_call bar
+ bpf_add R0, R7
+ bpf_exit
+
+ After JIT to x86_64 may look like:
+
+ push %rbp
+ mov %rsp,%rbp
+ sub $0x228,%rsp
+ mov %rbx,-0x228(%rbp)
+ mov %r13,-0x220(%rbp)
+ mov %rdi,%rbx
+ mov $0x2,%esi
+ mov $0x3,%edx
+ mov $0x4,%ecx
+ mov $0x5,%r8d
+ callq foo
+ mov %rax,%r13
+ mov %rbx,%rdi
+ mov $0x2,%esi
+ mov $0x3,%edx
+ mov $0x4,%ecx
+ mov $0x5,%r8d
+ callq bar
+ add %r13,%rax
+ mov -0x228(%rbp),%rbx
+ mov -0x220(%rbp),%r13
+ leaveq
+ retq
+
+ Which is in this example equivalent in C to:
+
+ u64 bpf_filter(u64 ctx)
+ {
+ return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
+ }
+
+ In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
+ arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
+ registers and place their return value into '%rax' which is R0 in BPF.
+ Prologue and epilogue are emitted by JIT and are implicit in the
+ interpreter. R0-R5 are scratch registers, so BPF program needs to preserve
+ them across the calls as defined by calling convention.
+
+ For example the following program is invalid:
+
+ bpf_mov R1, 1
+ bpf_call foo
+ bpf_mov R0, R1
+ bpf_exit
+
+ After the call the registers R1-R5 contain junk values and cannot be read.
+ In the future a BPF verifier can be used to validate internal BPF programs.
Also in the new design, BPF is limited to 4096 insns, which means that any
program will terminate quickly and will only call a fixed number of kernel
@@ -663,6 +807,25 @@ A program, that is translated internally consists of the following elements:
op:16, jt:8, jf:8, k:32 ==> op:8, a_reg:4, x_reg:4, off:16, imm:32
+So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
+has room for new instructions. Some of them may use 16/24/32 byte encoding. New
+instructions must be multiple of 8 bytes to preserve backward compatibility.
+
+Internal BPF is a general purpose RISC instruction set. Not every register and
+every instruction are used during translation from original BPF to new format.
+For example, socket filters are not using 'exclusive add' instruction, but
+tracing filters may do to maintain counters of events, for example. Register R9
+is not used by socket filters either, but more complex filters may be running
+out of registers and would have to resort to spill/fill to stack.
+
+Internal BPF can used as generic assembler for last step performance
+optimizations, socket filters and seccomp are using it as assembler. Tracing
+filters may use it as assembler to generate code from kernel. In kernel usage
+may not be bounded by security considerations, since generated internal BPF code
+may be optimizing internal code path and not being exposed to the user space.
+Safety of internal BPF can come from a verifier (TBD). In such use cases as
+described, it may be used as safe instruction set.
+
Just like the original BPF, the new format runs within a controlled environment,
is deterministic and the kernel can easily prove that. The safety of the program
can be determined in two steps: first step does depth-first-search to disallow
diff --git a/MAINTAINERS b/MAINTAINERS
index 7578deb8ff20..bde15ffcccf9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1967,6 +1967,12 @@ S: Maintained
F: drivers/bcma/
F: include/linux/bcma/
+BROADCOM SYSTEMPORT ETHERNET DRIVER
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/broadcom/bcmsysport.*
+
BROCADE BFA FC SCSI DRIVER
M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index cb6811e5ae5a..baf56cc92040 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -665,6 +665,8 @@
mac: ethernet@4a100000 {
compatible = "ti,cpsw";
ti,hwmods = "cpgmac0";
+ clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ clock-names = "fck", "cpts";
cpdma_channels = <8>;
ale_entries = <1024>;
bd_ram_size = <0x2000>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d1f8707ff1df..03a225505126 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -489,6 +489,8 @@
#address-cells = <1>;
#size-cells = <1>;
ti,hwmods = "cpgmac0";
+ clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ clock-names = "fck", "cpts";
status = "disabled";
cpdma_channels = <8>;
ale_entries = <1024>;
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index 51d0e912c8f5..1929ad390d88 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -165,5 +165,11 @@
reg = <0xd8100000 0x10000>;
interrupts = <48>;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 7525982262ac..b1c59a766a13 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -218,5 +218,11 @@
reg = <0xd8100000 0x10000>;
interrupts = <48>;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index d98386dd2882..8fbccfbe75f3 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -298,5 +298,11 @@
bus-width = <4>;
sdon-inverted;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index bdbda1453aa9..04471dc64847 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 019b9615e43c..2ff81ae8f3af 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index e6fd8a026c7b..cd00e1774491 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
- : "0" (a), "r" (b));
+ : "0" (a), "rm" (b));
return a;
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ return (__force __wsum)add32_with_carry((__force unsigned)csum,
+ (__force unsigned)addend);
+}
+
#endif /* _ASM_X86_CHECKSUM_64_H */
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 7399303d7d99..dc79f88f8717 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -59,6 +59,8 @@ struct btmrvl_device {
};
struct btmrvl_adapter {
+ void *hw_regs_buf;
+ u8 *hw_regs;
u32 int_count;
struct sk_buff_head tx_queue;
u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 2c4997ce2484..e9dbddb0b8f1 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -24,6 +24,7 @@
#include <net/bluetooth/hci_core.h>
#include "btmrvl_drv.h"
+#include "btmrvl_sdio.h"
#define VERSION "1.0"
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
return 0;
}
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
{
int ret;
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
static void btmrvl_init_adapter(struct btmrvl_private *priv)
{
+ int buf_size;
+
skb_queue_head_init(&priv->adapter->tx_queue);
priv->adapter->ps_state = PS_AWAKE;
+ buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
+ priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!priv->adapter->hw_regs_buf) {
+ priv->adapter->hw_regs = NULL;
+ BT_ERR("Unable to allocate buffer for hw_regs.");
+ } else {
+ priv->adapter->hw_regs =
+ (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
+ BTSDIO_DMA_ALIGN);
+ BT_DBG("hw_regs_buf=%p hw_regs=%p",
+ priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
+ }
+
init_waitqueue_head(&priv->adapter->cmd_wait_q);
}
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
{
skb_queue_purge(&priv->adapter->tx_queue);
+ kfree(priv->adapter->hw_regs_buf);
kfree(priv->adapter);
priv->adapter = NULL;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 1b52c9f5230d..9dedca516ff5 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_0 = 0x00,
.io_port_1 = 0x01,
.io_port_2 = 0x02,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_0 = 0x78,
.io_port_1 = 0x79,
.io_port_2 = 0x7a,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
.io_port_0 = 0xd8,
.io_port_1 = 0xd9,
.io_port_2 = 0xda,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x01,
+ .card_misc_cfg = 0xcc,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
return 0;
}
-static void btmrvl_sdio_interrupt(struct sdio_func *func)
+static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
{
- struct btmrvl_private *priv;
- struct btmrvl_sdio_card *card;
- ulong flags;
- u8 ireg = 0;
+ struct btmrvl_adapter *adapter = card->priv->adapter;
int ret;
- card = sdio_get_drvdata(func);
- if (!card || !card->priv) {
- BT_ERR("sbi_interrupt(%p) card or priv is "
- "NULL, card=%p\n", func, card);
- return;
+ ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
+ if (ret) {
+ BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
+ return ret;
}
- priv = card->priv;
+ *ireg = adapter->hw_regs[card->reg->host_intstatus];
+ BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
+
+ return 0;
+}
- ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
+static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
+{
+ int ret;
+
+ *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_readb: read int status register failed");
- return;
+ BT_ERR("sdio_readb: read int status failed: %d", ret);
+ return ret;
}
- if (ireg != 0) {
+ if (*ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* Clear the interrupt status register and re-enable the
* interrupt.
*/
- BT_DBG("ireg = 0x%x", ireg);
+ BT_DBG("int_status = 0x%x", *ireg);
- sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
- UP_LD_HOST_INT_STATUS),
- card->reg->host_intstatus, &ret);
+ sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
+ UP_LD_HOST_INT_STATUS),
+ card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_writeb: clear int status register failed");
- return;
+ BT_ERR("sdio_writeb: clear int status failed: %d", ret);
+ return ret;
}
}
+ return 0;
+}
+
+static void btmrvl_sdio_interrupt(struct sdio_func *func)
+{
+ struct btmrvl_private *priv;
+ struct btmrvl_sdio_card *card;
+ ulong flags;
+ u8 ireg = 0;
+ int ret;
+
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("sbi_interrupt(%p) card or priv is "
+ "NULL, card=%p\n", func, card);
+ return;
+ }
+
+ priv = card->priv;
+
+ if (card->reg->int_read_to_clear)
+ ret = btmrvl_sdio_read_to_clear(card, &ireg);
+ else
+ ret = btmrvl_sdio_write_to_clear(card, &ireg);
+
+ if (ret)
+ return;
+
spin_lock_irqsave(&priv->driver_lock, flags);
sdio_ireg |= ireg;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
+ if (card->reg->int_read_to_clear) {
+ reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+
+ reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ }
+
sdio_set_drvdata(func, card);
sdio_release_host(func);
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca9..d4dd3b0fa53d 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
u8 io_port_0;
u8 io_port_1;
u8 io_port_2;
+ bool int_read_to_clear;
+ u8 host_int_rsr;
+ u8 card_misc_cfg;
};
struct btmrvl_sdio_card {
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7048a583fe51..66db9a803373 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -55,13 +55,6 @@ struct h4_struct {
struct sk_buff_head txq;
};
-/* H4 receiver States */
-#define H4_W4_PACKET_TYPE 0
-#define H4_W4_EVENT_HDR 1
-#define H4_W4_ACL_HDR 2
-#define H4_W4_SCO_HDR 3
-#define H4_W4_DATA 4
-
/* Initialize protocol */
static int h4_open(struct hci_uart *hu)
{
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 67c8de572c50..b4877e0ee910 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -110,9 +110,25 @@ static struct ti_dt_clk am43xx_clks[] = {
int __init am43xx_dt_clk_init(void)
{
+ struct clk *clk1, *clk2;
+
ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
+ /*
+ * cpsw_cpts_rft_clk has got the choice of 3 clocksources
+ * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+ * By default dpll_core_m4_ck is selected, witn this as clock
+ * source the CPTS doesnot work properly. It gives clockcheck errors
+ * while running PTP.
+ * clockcheck: clock jumped backward or running slower than expected!
+ * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+ * In AM335x dpll_core_m5_ck is the default clocksource.
+ */
+ clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+ clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+ clk_set_parent(clk1, clk2);
+
return 0;
}
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 414dbf6da89a..fc9f9d03fa13 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
-/***************************/
-/* inline function defines */
-/***************************/
-#ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */
-
-/* memory write and dummy IO read to avoid PCI byte merge problems */
-#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
-/* memory write without dummy IO access for fifo data access */
-#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
-#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
-#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
-#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
-#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
-#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
-#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
-#define PCI_ENA_MEMIO 0x03
-
-#else
-
/* inline functions io mapped */
static inline void
SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
#define PCI_ENA_REGIO 0x01
-#endif /* HISAX_HFC4S8S_PCIMEM */
-
/******************************************************/
/* function to read critical counter registers that */
/* may be updated by the chip during read */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
return;
} else {
/* read errornous D frame */
-
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
fRead_hfc32(l1p->hw);
-#endif
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- fRead_hfc8(l1p->hw);
-#endif
+ fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
cp = skb->data;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) cp) =
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
-#endif
cp += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- *cp++ = fRead_hfc8(l1p->hw);
-#endif
+ *cp++ = fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
wait_busy(l1->hw);
return;
}
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) bch->rx_ptr) =
- Read_hfc32(l1->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) bch->rx_ptr) =
fRead_hfc32(l1->hw);
-#endif
bch->rx_ptr += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
-#else
- *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-#endif
+ *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
if (hdlc_complete) {
/* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
cp = skb->data;
cnt = skb->len;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
SetRegAddr(l1p->hw, A_FIFO_DATA0);
fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- while (cnt--)
- fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
-#else
while (cnt--)
fWrite_hfc8(l1p->hw, *cp++);
-#endif
l1p->tx_cnt = skb->truesize;
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
cp = skb->data + bch->tx_cnt;
bch->tx_cnt += cnt;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
fWrite_hfc32(l1->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
while (cnt--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
-#else
- fWrite_hfc8(l1->hw, *cp++);
-#endif
+ fWrite_hfc8(l1->hw, *cp++);
if (bch->tx_cnt >= skb->len) {
if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
return IRQ_NONE;
-#ifndef HISAX_HFC4S8S_PCIMEM
/* read current selected regsister */
old_ioreg = GetRegAddr(hw);
-#endif
/* Layer 1 State change */
hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!
(b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
&& !hw->mr.r_irq_statech) {
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_NONE;
}
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
/* queue the request to allow other cards to interrupt */
schedule_work(&hw->tqueue);
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_HANDLED;
} /* hfc4s8s_interrupt */
@@ -1471,13 +1388,8 @@ static void
release_pci_ports(hfc4s8s_hw *hw)
{
pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- if (hw->membase)
- iounmap((void *) hw->membase);
-#else
if (hw->iobase)
release_region(hw->iobase, 8);
-#endif
}
/*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
static void
enable_pci_ports(hfc4s8s_hw *hw)
{
-#ifdef HISAX_HFC4S8S_PCIMEM
- pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
-#else
pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-#endif
}
/*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
hw->irq);
goto out;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- printk(KERN_INFO
- "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
- hw->hw_membase, hw->irq);
-#else
printk(KERN_INFO
"HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
hw->iobase, hw->irq);
-#endif
hfc_hardware_enable(hw, 1, 0);
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->irq = pdev->irq;
hw->iobase = pci_resource_start(pdev, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
- hw->membase = ioremap((ulong) hw->hw_membase, 256);
-#else
if (!request_region(hw->iobase, 8, hw->card_name)) {
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
goto out;
}
-#endif
pci_set_drvdata(pdev, hw);
err = setup_instance(hw);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..9a0d61e0c188 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2440,7 +2440,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto err_free;
}
- slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
first_ok_slave = NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..70de039dad2e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1347,6 +1347,77 @@ void bond_alb_deinitialize(struct bonding *bond)
rlb_deinitialize(bond);
}
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+ struct slave *tx_slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct ethhdr *eth_data = eth_hdr(skb);
+
+ if (!tx_slave) {
+ /* unbalanced or unassigned, send through primary */
+ tx_slave = rcu_dereference(bond->curr_active_slave);
+ if (bond->params.tlb_dynamic_lb)
+ bond_info->unbalanced_load += skb->len;
+ }
+
+ if (tx_slave && SLAVE_IS_OK(tx_slave)) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
+ }
+
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
+ }
+
+ if (tx_slave && bond->params.tlb_dynamic_lb) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
+ }
+
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb_any(skb);
+out:
+ return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct ethhdr *eth_data;
+ struct slave *tx_slave = NULL;
+ u32 hash_index;
+
+ skb_reset_mac_header(skb);
+ eth_data = eth_hdr(skb);
+
+ /* Do not TX balance any multicast or broadcast */
+ if (!is_multicast_ether_addr(eth_data->h_dest)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPX):
+ /* In case of IPX, it will falback to L2 hash */
+ case htons(ETH_P_IPV6):
+ hash_index = bond_xmit_hash(bond, skb);
+ if (bond->params.tlb_dynamic_lb) {
+ tx_slave = tlb_choose_channel(bond,
+ hash_index & 0xFF,
+ skb->len);
+ } else {
+ struct list_head *iter;
+ int idx = hash_index % bond->slave_cnt;
+
+ bond_for_each_slave_rcu(bond, tx_slave, iter)
+ if (--idx < 0)
+ break;
+ }
+ break;
+ }
+ }
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1355,7 +1426,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
- int do_tx_balance = 1;
+ bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
@@ -1370,7 +1441,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
(iph->daddr == ip_bcast) ||
(iph->protocol == IPPROTO_IGMP)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
@@ -1382,7 +1453,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* that here just in case.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1390,7 +1461,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* broadcasts in IPv4.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1400,7 +1471,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1410,7 +1481,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1419,7 +1490,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* this family since it has an "ARP" like
* mechanism
*/
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1427,12 +1498,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
- do_tx_balance = 0;
+ do_tx_balance = false;
if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
break;
default:
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1441,32 +1512,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
}
- if (!tx_slave) {
- /* unbalanced or unassigned, send through primary */
- tx_slave = rcu_dereference(bond->curr_active_slave);
- bond_info->unbalanced_load += skb->len;
- }
-
- if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- ether_addr_copy(eth_data->h_source,
- tx_slave->dev->dev_addr);
- }
-
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
- }
-
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
-
- /* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
-out:
- return NETDEV_TX_OK;
+ return bond_do_alb_xmit(skb, bond, tx_slave);
}
void bond_alb_monitor(struct work_struct *work)
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..a1741cb23100 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2291,8 +2291,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
- !slave_do_arp_validate_only(bond, slave))
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+ !slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (!is_arp) {
@@ -3015,20 +3015,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
- * @count: modulo value
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
*/
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
- return bond_eth_hash(skb) % count;
+ return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3039,7 +3037,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash % count;
+ return hash;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3098,7 +3096,8 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
return -ENOMEM;
- queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ if (bond->params.tlb_dynamic_lb)
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3666,7 +3665,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
return NETDEV_TX_OK;
}
@@ -3776,8 +3775,9 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_8023AD:
return bond_3ad_xmit_xor(skb, dev);
case BOND_MODE_ALB:
- case BOND_MODE_TLB:
return bond_alb_xmit(skb, dev);
+ case BOND_MODE_TLB:
+ return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
pr_err("%s: Error: Unknown bonding mode %d\n",
@@ -3998,7 +3998,8 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
- (bond_mode != BOND_MODE_8023AD)) {
+ (bond_mode != BOND_MODE_8023AD) &&
+ (bond_mode != BOND_MODE_TLB)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
@@ -4304,6 +4305,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
+ params->tlb_dynamic_lb = 1; /* Default value */
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..6dc49da106d6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -179,6 +181,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -199,7 +207,7 @@ static const struct bond_option bond_opts[] = {
[BOND_OPT_XMIT_HASH] = {
.id = BOND_OPT_XMIT_HASH,
.name = "xmit_hash_policy",
- .desc = "balance-xor and 802.3ad hashing method",
+ .desc = "balance-xor, 802.3ad, and tlb hashing method",
.values = bond_xmit_hashtype_tbl,
.set = bond_option_xmit_hash_policy_set
},
@@ -364,9 +372,33 @@ static const struct bond_option bond_opts[] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_slaves_set
},
+ [BOND_OPT_TLB_DYNAMIC_LB] = {
+ .id = BOND_OPT_TLB_DYNAMIC_LB,
+ .name = "tlb_dynamic_lb",
+ .desc = "Enable dynamic flow shuffling",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ .values = bond_tlb_dynamic_lb_tbl,
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_tlb_dynamic_lb_set,
+ },
{ }
};
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+ const struct bond_option *opt;
+ int option;
+
+ for (option = 0; option < BOND_OPT_LAST; option++) {
+ opt = bond_opt_get(option);
+ if (opt && !strcmp(opt->name, name))
+ return opt;
+ }
+
+ return NULL;
+}
+
/* Searches for a value in opt's values[] table */
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
@@ -745,6 +777,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
+/* There are two tricky bits here. First, if MII monitoring is activated, then
+ * we must disable ARP monitoring. Second, if the timer isn't running, we must
+ * start it.
+ */
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -783,6 +819,10 @@ static int bond_option_miimon_set(struct bonding *bond,
return 0;
}
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -841,6 +881,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
return 0;
}
+/* There are two tricky bits here. First, if ARP monitoring is activated, then
+ * we must disable MII monitoring. Second, if the ARP timer isn't running,
+ * we must start it.
+ */
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1337,3 +1381,13 @@ err_no_cmd:
ret = -EPERM;
goto out;
}
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.tlb_dynamic_lb = newval->value;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
BOND_OPT_RESEND_IGMP,
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
BOND_OPT_LAST
};
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
struct bond_opt_value *val);
const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..39c4d8d61074 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
return NULL;
}
-/*
- * "store" function for the bond_masters attribute. This is what
+/* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
*
* The class parameter is ignored.
*
*/
-
static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
.store = bonding_store_bonds,
};
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ const struct bond_option *opt;
+ int ret;
+
+ opt = bond_opt_get_by_name(attr->attr.name);
+ if (WARN_ON(!opt))
+ return -ENOENT;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+/* Show the slaves in the current bond. */
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -190,32 +204,10 @@ static ssize_t bonding_show_slaves(struct device *d,
return res;
}
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
- bonding_store_slaves);
+ bonding_sysfs_store_option);
-/*
- * Show and set the bonding mode. The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -226,26 +218,10 @@ static ssize_t bonding_show_mode(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
}
-
-static ssize_t bonding_store_mode(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- bonding_show_mode, bonding_store_mode);
+ bonding_show_mode, bonding_sysfs_store_option);
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
- bonding_show_xmit_hash, bonding_store_xmit_hash);
+ bonding_show_xmit_hash, bonding_sysfs_store_option);
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
static ssize_t bonding_show_arp_validate(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
-
-static ssize_t bonding_store_arp_validate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
- bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+ bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
static ssize_t bonding_show_arp_all_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
- bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+ bonding_show_arp_all_targets, bonding_sysfs_store_option);
-/*
- * Show and store fail_over_mac. User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
static ssize_t bonding_show_fail_over_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
- bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+ bonding_show_fail_over_mac, bonding_sysfs_store_option);
-/*
- * Show and set the arp timer interval. There are two tricky bits
- * here. First, if ARP monitoring is activated, then we must disable
- * MII monitoring. Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
static ssize_t bonding_show_arp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.arp_interval);
}
-
-static ssize_t bonding_store_arp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
- bonding_show_arp_interval, bonding_store_arp_interval);
+ bonding_show_arp_interval, bonding_sysfs_store_option);
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
return res;
}
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+ bonding_show_arp_targets, bonding_sysfs_store_option);
-static ssize_t bonding_store_arp_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays. These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_downdelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
- bonding_show_downdelay, bonding_store_downdelay);
+ bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_updelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
- bonding_show_updelay, bonding_store_updelay);
+ bonding_show_updelay, bonding_sysfs_store_option);
-/*
- * Show and set the LACP interval. Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
static ssize_t bonding_show_lacp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
-
-static ssize_t bonding_store_lacp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
- bonding_show_lacp, bonding_store_lacp);
+ bonding_show_lacp, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
return sprintf(buf, "%u\n", bond->params.min_links);
}
-
-static ssize_t bonding_store_min_links(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
- bonding_show_min_links, bonding_store_min_links);
+ bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
- bonding_show_ad_select, bonding_store_ad_select);
+ bonding_show_ad_select, bonding_sysfs_store_option);
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
-/*
- * Show and set the MII monitor interval. There are two tricky bits
- * here. First, if MII monitoring is activated, then we must disable
- * ARP monitoring. Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
return sprintf(buf, "%d\n", bond->params.miimon);
}
-
-static ssize_t bonding_store_miimon(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
- bonding_show_miimon, bonding_store_miimon);
+ bonding_show_miimon, bonding_sysfs_store_option);
-/*
- * Show and set the primary slave. The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
static ssize_t bonding_show_primary(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
return count;
}
-
-static ssize_t bonding_store_primary(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
- bonding_show_primary, bonding_store_primary);
+ bonding_show_primary, bonding_sysfs_store_option);
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
static ssize_t bonding_show_primary_reselect(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
- bonding_show_primary_reselect,
- bonding_store_primary_reselect);
+ bonding_show_primary_reselect, bonding_sysfs_store_option);
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
return sprintf(buf, "%d\n", bond->params.use_carrier);
}
-
-static ssize_t bonding_store_carrier(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
- bonding_show_carrier, bonding_store_carrier);
+ bonding_show_carrier, bonding_sysfs_store_option);
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
return count;
}
-
-static ssize_t bonding_store_active_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
- bonding_show_active_slave, bonding_store_active_slave);
-
+ bonding_show_active_slave, bonding_sysfs_store_option);
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
static ssize_t bonding_show_ad_num_ports(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
static ssize_t bonding_show_ad_actor_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
static ssize_t bonding_show_ad_partner_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
}
static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
return res;
}
-
-/*
- * Set the queue_ids of the slaves in the current bond. The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
- bonding_store_queue_id);
+ bonding_sysfs_store_option);
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
static ssize_t bonding_show_slaves_active(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
return sprintf(buf, "%d\n", bond->params.all_slaves_active);
}
-
-static ssize_t bonding_store_slaves_active(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
- bonding_show_slaves_active, bonding_store_slaves_active);
+ bonding_show_slaves_active, bonding_sysfs_store_option);
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
static ssize_t bonding_show_resend_igmp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
return sprintf(buf, "%d\n", bond->params.resend_igmp);
}
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
- bonding_show_resend_igmp, bonding_store_resend_igmp);
+ bonding_show_resend_igmp, bonding_sysfs_store_option);
static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.lp_interval);
}
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_sysfs_store_option);
-static ssize_t bonding_store_lp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
- bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+ bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%u\n", packets_per_slave);
}
-
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
- bonding_show_packets_per_slave,
- bonding_store_packets_per_slave);
+ bonding_show_packets_per_slave, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
+ &dev_attr_tlb_dynamic_lb.attr,
NULL,
};
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
.attrs = per_bond_attrs,
};
-/*
- * Initialize sysfs. This sets up the bonding_masters file in
+/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
bn->net);
- /*
- * Permit multiple loads of the module by ignoring failures to
+ /* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
* created by second or subsequent loads of the module will
* not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
}
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
void bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
-/*
- * Initialize sysfs for each bond. This sets up and registers
+/* Initialize sysfs for each bond. This sets up and registers
* the 'bondctl' directory for each individual bond under /sys/class/net.
*/
void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..1621226b8297 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -174,6 +174,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
};
@@ -182,8 +183,6 @@ struct bond_parm_tbl {
int mode;
};
-#define BOND_MAX_MODENAME_LEN 20
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -395,8 +394,7 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
-static inline int slave_do_arp_validate_only(struct bonding *bond,
- struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
{
return bond->params.arp_validate & BOND_ARP_FILTER;
}
@@ -487,7 +485,14 @@ static inline bool slave_can_tx(struct slave *slave)
return false;
}
-struct bond_net;
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -499,7 +504,7 @@ int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -516,15 +521,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
-struct bond_net {
- struct net * net; /* Associated network namespace */
- struct list_head dev_list;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry * proc_dir;
-#endif
- struct class_attribute class_attr_bonding_masters;
-};
-
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
void bond_remove_proc_entry(struct bonding *bond);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..4aacaa9b478a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -77,12 +77,6 @@ config CAN_TI_HECC
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
-config CAN_MCP251X
- tristate "Microchip MCP251x SPI CAN controllers"
- depends on SPI && HAS_DMA
- ---help---
- Driver for the Microchip MCP251x SPI CAN controllers.
-
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
@@ -133,6 +127,8 @@ source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/spi/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..c42058868b0f 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -19,7 +20,6 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..58f71e1fcc4e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
#include "c_can.h"
+#define PCI_DEVICE_ID_PCH_CAN 0x8818
+#define PCH_PCI_SOFT_RESET 0x01fc
+
enum c_can_pci_reg_align {
C_CAN_REG_ALIGN_16,
C_CAN_REG_ALIGN_32,
+ C_CAN_REG_32,
};
struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
enum c_can_pci_reg_align reg_align;
/* Set the frequency */
unsigned int freq;
+ /* PCI bar number */
+ int bar;
+ /* Callback for reset */
+ void (*init)(const struct c_can_priv *priv, bool enable);
};
/*
@@ -63,6 +71,29 @@ static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static u16 c_can_pci_read_reg_32bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+ if (enable) {
+ u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+ /* write to sw reset register */
+ iowrite32(1, addr);
+ iowrite32(0, addr);
+ }
+}
+
static int c_can_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -90,7 +121,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
}
- addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ addr = pci_iomap(pdev, c_can_pci_data->bar,
+ pci_resource_len(pdev, c_can_pci_data->bar));
if (!addr) {
dev_err(&pdev->dev,
"device has no PCI memory resources, "
@@ -147,11 +179,17 @@ static int c_can_pci_probe(struct pci_dev *pdev,
priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
break;
+ case C_CAN_REG_32:
+ priv->read_reg = c_can_pci_read_reg_32bit;
+ priv->write_reg = c_can_pci_write_reg_32bit;
+ break;
default:
ret = -EINVAL;
goto out_free_c_can;
}
+ priv->raminit = c_can_pci_data->init;
+
ret = register_c_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -198,6 +236,15 @@ static struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
+ .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+ .type = BOSCH_C_CAN,
+ .reg_align = C_CAN_REG_32,
+ .freq = 50000000, /* 50 MHz */
+ .init = c_can_pci_reset_pch,
+ .bar = 1,
};
#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +254,8 @@ static struct c_can_pci_data c_can_sta2x11= {
static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
c_can_sta2x11),
+ C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+ c_can_pch),
{},
};
static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
/*
* netdev sysfs
*/
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *ndev = to_net_dev(dev);
- struct softing_priv *priv = netdev2softing(ndev);
-
- return sprintf(buf, "%i\n", priv->index);
-}
-
static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
return count;
}
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
static const struct attribute *const netdev_sysfs_attrs[] = {
- &dev_attr_channel.attr,
&dev_attr_chip.attr,
&dev_attr_output.attr,
NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
{
int ret;
- netdev->sysfs_groups[0] = &netdev_sysfs_group;
ret = register_candev(netdev);
if (ret) {
dev_alert(&netdev->dev, "register failed\n");
return ret;
}
+ if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+ netdev_alert(netdev, "sysfs group failed\n");
+
return 0;
}
static void softing_netdev_cleanup(struct net_device *netdev)
{
+ sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
unregister_candev(netdev);
free_candev(netdev);
}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
DEV_ATTR_RO_STR(hardware, pdat->name);
DEV_ATTR_RO(hardware_version, id.hw_version);
DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
static struct attribute *softing_pdev_attrs[] = {
&dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
&dev_attr_hardware.attr,
&dev_attr_hardware_version.attr,
&dev_attr_license.attr,
- &dev_attr_frequency.attr,
- &dev_attr_txpending.attr,
NULL,
};
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+ depends on SPI
+
+config CAN_MCP251X
+ tristate "Microchip MCP251x SPI CAN controllers"
+ depends on HAS_DMA
+ ---help---
+ Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..bc235f9dc754 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
#define TX_ECHO_SKB_MAX 1
+#define MCP251X_OST_DELAY_MS (5)
+
#define DEVICE_NAME "mcp251x"
static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ u8 reg;
int ret;
- unsigned long timeout;
+
+ /* Wait for oscillator startup timer after power up */
+ mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = spi_write(spi, priv->spi_tx_buf, 1);
- if (ret) {
- dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
- return -EIO;
- }
+ ret = mcp251x_spi_trans(spi, 1);
+ if (ret)
+ return ret;
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ reg = mcp251x_read_reg(spi, CANSTAT);
+ if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+ return -ENODEV;
- /* Wait for reset to finish */
- timeout = jiffies + HZ;
- mdelay(10);
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
- != CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
{
- int st1, st2;
+ u8 ctrl;
+ int ret;
- mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_reset(spi);
+ if (ret)
+ return ret;
- /*
- * Please note that these are "magic values" based on after
- * reset defaults taken from data sheet which allows us to see
- * if we really have a chip on the bus (we avoid common all
- * zeroes or all ones situations)
- */
- st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
- st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
+ ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+ dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
- dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
+ /* Check for power up default value */
+ if ((ctrl & 0x17) != 0x07)
+ return -ENODEV;
- /* Check for power up default values */
- return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
+ return 0;
}
static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mdelay(10);
mcp251x_hw_reset(spi);
mcp251x_setup(net, priv, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- int freq, ret = -ENODEV;
struct clk *clk;
+ int freq, ret;
clk = devm_clk_get(&spi->dev, NULL);
if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->net = net;
priv->clk = clk;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- spi_set_drvdata(spi, priv);
-
priv->spi = spi;
mutex_init(&priv->mcp_lock);
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- /* Configure the SPI bus */
- spi->mode = spi->mode ? : SPI_MODE_0;
- if (mcp251x_is_2510(spi))
- spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
- else
- spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
- spi->bits_per_word = 8;
- spi_setup(spi);
-
/* Here is OK to not lock the MCP, no one knows about it yet */
- if (!mcp251x_hw_probe(spi)) {
- ret = -ENODEV;
+ ret = mcp251x_hw_probe(spi);
+ if (ret)
goto error_probe;
- }
+
mcp251x_hw_sleep(spi);
ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- return ret;
+ return 0;
error_probe:
if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..0b918ebad76b 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -19,7 +19,7 @@ config CAN_KVASER_USB
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light.
- The driver gives support for the following devices:
+ The driver provides support for the following devices:
- Kvaser Leaf Light
- Kvaser Leaf Professional HS
- Kvaser Leaf SemiPro HS
@@ -36,6 +36,8 @@ config CAN_KVASER_USB
- Kvaser Leaf Light "China"
- Kvaser BlackBird SemiPro
- Kvaser USBcan R
+ - Kvaser Leaf Light v2
+ - Kvaser Mini PCI Express HS
If unsure, say N.
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
#define USB_OEM_MERCURY_PRODUCT_ID 34
#define USB_OEM_LEAF_PRODUCT_ID 35
#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
/* USB devices features */
#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
.driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
void *buf;
int actual_len;
int err;
- int pos = 0;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
+ do {
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE, &actual_len,
+ USB_RECV_TIMEOUT);
+ if (err < 0)
+ goto end;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
+ pos = 0;
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
- if (!tmp->len)
- break;
+ if (!tmp->len)
+ break;
- if (pos + tmp->len > actual_len) {
- dev_err(dev->udev->dev.parent, "Format error\n");
- break;
- }
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent,
+ "Format error\n");
+ break;
+ }
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
+ if (tmp->id == id) {
+ memcpy(msg, tmp, tmp->len);
+ goto end;
+ }
- pos += tmp->len;
- }
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
err = -EINVAL;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = REG_PORT(p);
u16 val;
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
static int mv88e6131_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..38c500f95b9e 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,10 +37,8 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
+ struct msgdma_csr *txcsr = priv->tx_dma_csr;
+ struct msgdma_csr *rxcsr = priv->rx_dma_csr;
/* Reset Rx mSGDMA */
iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
@@ -138,8 +136,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
u32 ready = 0;
u32 inuse;
u32 status;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
+ struct msgdma_csr *txcsr = priv->tx_dma_csr;
/* Get number of sent descriptors */
inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
@@ -190,10 +187,8 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
u32 rxstatus = 0;
u32 pktlength;
u32 pktstatus;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
- struct msgdma_response *rxresp =
- (struct msgdma_response *)priv->rx_dma_resp;
+ struct msgdma_csr *rxcsr = priv->rx_dma_csr;
+ struct msgdma_response *rxresp = priv->rx_dma_resp;
if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
pktlength = ioread32(&rxresp->bytes_transferred);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..dbd40e15b5cc 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -126,12 +126,12 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
*/
void sgdma_reset(struct altera_tse_private *priv)
{
- u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+ u32 *ptxdescripmem = priv->tx_dma_desc;
u32 txdescriplen = priv->txdescmem;
- u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+ u32 *prxdescripmem = priv->rx_dma_desc;
u32 rxdescriplen = priv->rxdescmem;
- struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
- struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
+ struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
/* Initialize descriptor memory to 0 */
memset(ptxdescripmem, 0, txdescriplen);
@@ -167,13 +167,13 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
void sgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
}
void sgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
}
@@ -185,8 +185,7 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
int pktstx = 0;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip *descbase = priv->tx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
@@ -219,7 +218,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
u32 sgdma_tx_completions(struct altera_tse_private *priv)
{
u32 ready = 0;
- struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip *desc = priv->tx_dma_desc;
if (!sgdma_txbusy(priv) &&
((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
@@ -246,8 +245,8 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
*/
u32 sgdma_rx_status(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
+ struct sgdma_descrip *base = priv->rx_dma_desc;
struct sgdma_descrip *desc = NULL;
int pktsrx;
unsigned int rxstatus = 0;
@@ -351,10 +350,8 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->rx_dma_desc;
-
+ struct sgdma_csr *csr = priv->rx_dma_csr;
+ struct sgdma_descrip *descbase = priv->rx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
@@ -397,7 +394,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
static int sgdma_async_write(struct altera_tse_private *priv,
struct sgdma_descrip *desc)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
if (sgdma_txbusy(priv))
return 0;
@@ -518,7 +515,7 @@ queue_rx_peekhead(struct altera_tse_private *priv)
*/
static int sgdma_rxbusy(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_csr *csr = priv->rx_dma_csr;
return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
}
@@ -528,7 +525,7 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *csr = priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */
while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config SYSTEMPORT
+ tristate "Broadcom SYSTEMPORT internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if SYSTEMPORT=y
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+ Ethernet switch.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..8db34d389675 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
};
-#define BCM_ENET_STATS_LEN \
- (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..4dc8d1e9829b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1614 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = __raw_readl(priv->base + offset + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+ */
+#define BCM_SYSPORT_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+ __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+ struct dma_desc *desc,
+ unsigned int port)
+{
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+ tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ if (priv->rx_csum_en)
+ reg |= RXCHK_EN;
+ else
+ reg &= ~RXCHK_EN;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->rx_csum_en && priv->crc_fwd)
+ reg |= RXCHK_SKIP_FCS;
+ else
+ reg &= ~RXCHK_SKIP_FCS;
+
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Hardware transmit checksum requires us to enable the Transmit status
+ * block prepended to the packet contents
+ */
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->tsb_en)
+ reg |= TSB_EN;
+ else
+ reg &= ~TSB_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & NETIF_F_RXCSUM)
+ ret = bcm_sysport_set_rx_csum(dev, wanted);
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+ return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_MIB_RX("rx_control", mib.rx.cf),
+ STAT_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_MIB_RX("rx_align", mib.rx.aln),
+ STAT_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_MIB_RX("rx_code", mib.rx.cde),
+ STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_MIB_TX("tx_control", mib.tx.cf),
+ STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* RXCHK misc statistics */
+ STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+ STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+ RXCHK_OTHER_DISC_CNTR),
+ /* RBUF misc statistics */
+ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+ STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, "0.1", sizeof(info->version));
+ strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_SYSPORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_sysport_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ switch (s->type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ continue;
+ case BCM_SYSPORT_STAT_MIB_RX:
+ case BCM_SYSPORT_STAT_MIB_TX:
+ case BCM_SYSPORT_STAT_RUNT:
+ if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+ offset = UMAC_MIB_STAT_OFFSET;
+ val = umac_readl(priv, UMAC_MIB_START + j + offset);
+ break;
+ case BCM_SYSPORT_STAT_RXCHK:
+ val = rxchk_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rxchk_writel(priv, 0, s->reg_offset);
+ break;
+ case BCM_SYSPORT_STAT_RBUF:
+ val = rbuf_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rbuf_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+
+ netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcm_sysport_update_mib_counters(priv);
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ if (s->type == BCM_SYSPORT_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ dma_addr_t mapping;
+ int ret;
+
+ cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!cb->skb) {
+ netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(kdev, cb->skb->data,
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcm_sysport_free_cb(cb);
+ netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DESC_SIZE);
+
+ netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+ return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+ unsigned int budget)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ unsigned int processed = 0, to_process;
+ struct bcm_sysport_cb *cb;
+ struct sk_buff *skb;
+ unsigned int p_index;
+ u16 len, status;
+ struct rsb *rsb;
+
+ /* Determine how much we should process since last call */
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ p_index &= RDMA_PROD_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ to_process = (RDMA_CONS_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ to_process = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p_index=%d rx_c_index=%d to_process=%d\n",
+ p_index, priv->rx_c_index, to_process);
+
+ while ((processed < to_process) &&
+ (processed < budget)) {
+
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+
+ /* Extract the Receive Status Block prepended */
+ rsb = (struct rsb *)skb->data;
+ len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+ status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+ DESC_STATUS_MASK;
+
+ processed++;
+ priv->rx_read_ptr++;
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+ p_index, priv->rx_c_index, priv->rx_read_ptr,
+ len, status);
+
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+ netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+ netif_err(priv, rx_err, ndev, "error packet\n");
+ if (RX_STATUS_OVFLOW)
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ skb_put(skb, len);
+
+ /* Hardware validated our checksum */
+ if (likely(status & DESC_L4_CSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Hardware pre-pends packets with 2bytes between Ethernet
+ * and IP header plus we have the Receive Status Block, strip
+ * off all of this from the SKB.
+ */
+ skb_pull(skb, sizeof(*rsb) + 2);
+ len -= (sizeof(*rsb) + 2);
+
+ /* UniMAC may forward CRC */
+ if (priv->crc_fwd) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ napi_gro_receive(&priv->napi, skb);
+refill:
+ bcm_sysport_rx_refill(priv, cb);
+ }
+
+ return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb,
+ unsigned int *bytes_compl,
+ unsigned int *pkts_compl)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+
+ if (cb->skb) {
+ ndev->stats.tx_bytes += cb->skb->len;
+ *bytes_compl += cb->skb->len;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ ndev->stats.tx_packets++;
+ (*pkts_compl)++;
+ bcm_sysport_free_cb(cb);
+ /* SKB fragment */
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ struct net_device *ndev = priv->netdev;
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ u32 hw_ind;
+
+ txq = netdev_get_tx_queue(ndev, ring->index);
+
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+ ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+ last_c_index = ring->c_index;
+ num_tx_cbs = ring->size;
+
+ c_index &= (num_tx_cbs - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+ ring->index, c_index, last_tx_cn, last_c_index);
+
+ while (last_tx_cn-- > 0) {
+ cb = ring->cbs + last_c_index;
+ bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+ ring->desc_count++;
+ last_c_index++;
+ last_c_index &= (num_tx_cbs - 1);
+ }
+
+ ring->c_index = c_index;
+
+ if (netif_tx_queue_stopped(txq) && pkts_compl)
+ netif_tx_wake_queue(txq);
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+ return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned int released;
+
+ spin_lock(&ring->lock);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock(&ring->lock);
+
+ return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_tx_ring *ring =
+ container_of(napi, struct bcm_sysport_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable TX interrupt */
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ }
+
+ return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++)
+ bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_priv *priv =
+ container_of(napi, struct bcm_sysport_priv, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_desc_rx(priv, budget);
+
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable RX interrupts */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+ }
+
+ return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq0_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious RX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ /* disable RX interrupts */
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* TX ring is full, perform a full reclaim since we do not know
+ * which one would trigger this interrupt
+ */
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring;
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq1_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious TX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ if (!(priv->irq1_stat & BIT(ring)))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_1_mask_set(priv, BIT(ring));
+ __napi_schedule(&txr->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *nskb;
+ struct tsb *tsb;
+ u32 csum_info;
+ u8 ip_proto;
+ u16 csum_start;
+ u16 ip_ver;
+
+ /* Re-allocate SKB if needed */
+ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+ nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+ dev_kfree_skb(skb);
+ if (!nskb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = nskb;
+ }
+
+ tsb = (struct tsb *)skb_push(skb, sizeof(*tsb));
+ /* Zero-out TSB by default */
+ memset(tsb, 0, sizeof(*tsb));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Get the checksum offset and the L4 (transport) offset */
+ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+ csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+ csum_info |= (csum_start << L4_PTR_SHIFT);
+
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ csum_info |= L4_LENGTH_VALID;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ csum_info |= L4_UDP;
+ } else
+ csum_info = 0;
+
+ tsb->l4_ptr_dest_map = csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ struct dma_desc *desc;
+ dma_addr_t mapping;
+ u32 len_status;
+ u16 queue;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue);
+ ring = &priv->tx_rings[queue];
+
+ /* lock against tx reclaim in BH context */
+ spin_lock(&ring->lock);
+ if (unlikely(ring->desc_count == 0)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "queue %d awake and ring full!\n", queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ ret = bcm_sysport_insert_tsb(skb, dev);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ mapping = dma_map_single(kdev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb->len);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Remember the SKB for future freeing */
+ cb = &ring->cbs[ring->curr_desc];
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, skb->len);
+
+ /* Fetch a descriptor entry from our pool */
+ desc = ring->desc_cpu;
+
+ desc->addr_lo = lower_32_bits(mapping);
+ len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+ len_status |= (skb->len << DESC_LEN_SHIFT);
+ len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+ DESC_STATUS_SHIFT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+ ring->curr_desc++;
+ if (ring->curr_desc == ring->size)
+ ring->curr_desc = 0;
+ ring->desc_count--;
+
+ /* Ensure write completion of the descriptor status/length
+ * in DRAM before the System Port WRITE_PORT register latches
+ * the value
+ */
+ wmb();
+ desc->addr_status_len = len_status;
+ wmb();
+
+ /* Write this descriptor address to the RING write port */
+ tdma_port_write_desc_addr(priv, desc, ring->index);
+
+ /* Check ring space and update SW control flow */
+ if (ring->desc_count == 0)
+ netif_tx_stop_queue(txq);
+
+ netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+ ring->index, ring->desc_count, ring->curr_desc);
+
+ ret = NETDEV_TX_OK;
+out:
+ spin_unlock(&ring->lock);
+ return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+ netdev_warn(dev, "transmit timeout!\n");
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned int changed = 0;
+ u32 cmd_bits = 0, reg;
+
+ if (priv->old_link != phydev->link) {
+ changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+ CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ if (changed)
+ phy_print_status(priv->phydev);
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ size_t size;
+ void *p;
+ u32 reg;
+
+ /* Simple descriptors partitioning for now */
+ size = 256;
+
+ /* We just need one DMA descriptor which is DMA-able, since writing to
+ * the port will allocate a new descriptor in its internal linked-list
+ */
+ p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ if (!p) {
+ netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+ if (!ring->cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize SW view of the ring */
+ spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ ring->index = index;
+ ring->size = size;
+ ring->alloc_size = ring->size;
+ ring->desc_cpu = p;
+ ring->desc_count = ring->size;
+ ring->curr_desc = 0;
+
+ /* Initialize HW ring */
+ tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+ tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+ tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+ /* Program the number of descriptors as MAX_THRESHOLD and half of
+ * its size for the hysteresis trigger
+ */
+ tdma_writel(priv, ring->size |
+ 1 << RING_HYST_THRESH_SHIFT,
+ TDMA_DESC_RING_MAX_HYST(index));
+
+ /* Enable the ring queue in the arbiter */
+ reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+ reg |= (1 << index);
+ tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+ napi_enable(&ring->napi);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "TDMA cfg, size=%d, desc_cpu=%p\n",
+ ring->size, ring->desc_cpu);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ u32 reg;
+
+ /* Caller should stop the TDMA engine */
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!(reg & TDMA_DISABLED))
+ netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ bcm_sysport_tx_reclaim(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+
+ if (ring->desc_dma) {
+ dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ ring->desc_dma = 0;
+ }
+ ring->size = 0;
+ ring->alloc_size = 0;
+
+ netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_CONTROL);
+ if (enable)
+ reg |= RDMA_EN;
+ else
+ reg &= ~RDMA_EN;
+ rdma_writel(priv, reg, RDMA_CONTROL);
+
+ /* Poll for RMDA disabling completion */
+ do {
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!!(reg & RDMA_DISABLED) == !enable)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (enable)
+ reg |= TDMA_EN;
+ else
+ reg &= ~TDMA_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ /* Poll for TMDA disabling completion */
+ do {
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!!(reg & TDMA_DISABLED) == !enable)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+ u32 reg;
+ int ret;
+
+ /* Initialize SW view of the RX ring */
+ priv->num_rx_bds = NUM_RX_DESC;
+ priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds *
+ sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!priv->rx_cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = bcm_sysport_alloc_rx_bufs(priv);
+ if (ret) {
+ netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+ return ret;
+ }
+
+ /* Initialize HW, ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ rdma_enable_set(priv, 0);
+
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+ rdma_writel(priv, 0, RDMA_PROD_INDEX);
+ rdma_writel(priv, 0, RDMA_CONS_INDEX);
+ rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+ RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+ /* Operate the queue in ring mode */
+ rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+ rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+ rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+ rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+ rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+ priv->num_rx_bds, priv->rx_bds);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ unsigned int i;
+ u32 reg;
+
+ /* Caller should ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+ if (dma_unmap_addr(cb, dma_addr))
+ dma_unmap_single(&priv->pdev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ bcm_sysport_free_cb(cb);
+ }
+
+ kfree(priv->rx_cbs);
+ priv->rx_cbs = NULL;
+
+ netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC)
+ reg |= CMD_PROMISC;
+ else
+ reg &= ~CMD_PROMISC;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* No support for ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= CMD_RX_EN | CMD_TX_EN;
+ else
+ reg &= ~(CMD_RX_EN | CMD_TX_EN);
+ umac_writel(priv, reg, UMAC_CMD);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+ unsigned int timeout = 0;
+ u32 reg;
+ int ret = 0;
+
+ umac_writel(priv, 0, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(&priv->pdev->dev,
+ "timeout waiting for MAC to come out of reset\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+ unsigned char *addr)
+{
+ umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+ topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+ topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+ mdelay(1);
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+ topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Reset UniMAC */
+ ret = umac_reset(priv);
+ if (ret) {
+ netdev_err(dev, "UniMAC reset failed\n");
+ return ret;
+ }
+
+ /* Flush TX and RX FIFOs at TOPCTRL level */
+ topctrl_flush(priv);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, 0);
+
+ /* Enable RBUF 2bytes alignment and Receive Status Block */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Set maximum frame length */
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* Set MAC address */
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ /* Read CRC forward */
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+ priv->phydev = of_phy_connect_fixed_link(dev, bcm_sysport_adj_link,
+ priv->phy_interface);
+ if (!priv->phydev) {
+ netdev_err(dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Reset house keeping link status */
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+ priv->old_pause = -1;
+
+ /* mask all interrupts and request them */
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request RX interrupt\n");
+ goto out_phy_disconnect;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
+
+ /* Initialize both hardware and software ring */
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = bcm_sysport_init_tx_ring(priv, i);
+ if (ret) {
+ netdev_err(dev, "failed to initialize TX ring %d\n",
+ i);
+ goto out_free_tx_ring;
+ }
+ }
+
+ /* Initialize linked-list */
+ tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+ /* Initialize RX ring */
+ ret = bcm_sysport_init_rx_ring(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize RX ring\n");
+ goto out_free_rx_ring;
+ }
+
+ /* Turn on RDMA */
+ ret = rdma_enable_set(priv, 1);
+ if (ret)
+ goto out_free_rx_ring;
+
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+ /* Turn on TDMA */
+ ret = tdma_enable_set(priv, 1);
+ if (ret)
+ goto out_clear_rx_int;
+
+ /* Enable NAPI */
+ napi_enable(&priv->napi);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(priv, 1);
+
+ phy_start(priv->phydev);
+
+ /* Enable TX interrupts for the 32 TXQs */
+ intrl2_1_mask_clear(priv, 0xffffffff);
+
+ /* Last call before we start the real business */
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+out_clear_rx_int:
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+ bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ free_irq(priv->irq1, dev);
+out_free_irq0:
+ free_irq(priv->irq0, dev);
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+ return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* stop all software from updating hardware */
+ netif_tx_stop_all_queues(dev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ /* mask all interrupts */
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ /* Disable UniMAC RX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ ret = tdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling RDMA\n");
+ return ret;
+ }
+
+ /* Wait for a maximum packet size to be drained */
+ usleep_range(2000, 3000);
+
+ ret = rdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling TDMA\n");
+ return ret;
+ }
+
+ /* Disable UniMAC TX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* Free RX/TX rings SW structures */
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ bcm_sysport_fini_rx_ring(priv);
+
+ free_irq(priv->irq0, dev);
+ free_irq(priv->irq1, dev);
+
+ /* Disconnect from PHY */
+ phy_disconnect(priv->phydev);
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .get_settings = bcm_sysport_get_settings,
+ .set_settings = bcm_sysport_set_settings,
+ .get_drvinfo = bcm_sysport_get_drvinfo,
+ .get_msglevel = bcm_sysport_get_msglvl,
+ .set_msglevel = bcm_sysport_set_msglvl,
+ .get_link = ethtool_op_get_link,
+ .get_strings = bcm_sysport_get_strings,
+ .get_ethtool_stats = bcm_sysport_get_stats,
+ .get_sset_count = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+ .ndo_start_xmit = bcm_sysport_xmit,
+ .ndo_tx_timeout = bcm_sysport_tx_timeout,
+ .ndo_open = bcm_sysport_open,
+ .ndo_stop = bcm_sysport_stop,
+ .ndo_set_features = bcm_sysport_set_features,
+ .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+ struct bcm_sysport_priv *priv;
+ struct device_node *dn;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ u32 txq, rxq;
+ int ret;
+
+ dn = pdev->dev.of_node;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Read the Transmit/Receive Queue properties */
+ if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+ txq = TDMA_NUM_RINGS;
+ if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+ rxq = 1;
+
+ dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Initialize private members */
+ priv = netdev_priv(dev);
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "register remap failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->netdev = dev;
+ priv->pdev = pdev;
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+ if (priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* Initialize netdevice members */
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ random_ether_addr(dev->dev_addr);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_ETHTOOL_OPS(dev, &bcm_sysport_ethtool_ops);
+ dev->netdev_ops = &bcm_sysport_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+ /* HW supported features, none enabled by default */
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct tsb) != 8);
+ dev->needed_headroom += sizeof(struct tsb);
+
+ /* We are interfaced to a switch which handles the multicast
+ * filtering for us, so we do not support programming any
+ * multicast hash table in this Ethernet MAC.
+ */
+ dev->flags &= ~IFF_MULTICAST;
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register net_device\n");
+ goto err;
+ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+ "Broadcom SYSTEMPORT" REV_FMT
+ " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+ priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+ return 0;
+err:
+ free_netdev(dev);
+ return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+ /* Not much to do, ndo_close has been called
+ * and we use managed allocations
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemport-v1.00" },
+ { .compatible = "brcm,systemport" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+ .probe = bcm_sysport_probe,
+ .remove = bcm_sysport_remove,
+ .driver = {
+ .name = "brcm-systemport",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_sysport_of_match,
+ },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..a0441e7c83cd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,677 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN 0x00
+#define DESC_ADDR_HI_SHIFT 0
+#define DESC_ADDR_HI_MASK 0xff
+#define DESC_STATUS_SHIFT 8
+#define DESC_STATUS_MASK 0x3ff
+#define DESC_LEN_SHIFT 18
+#define DESC_LEN_MASK 0x7fff
+#define DESC_ADDR_LO 0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH 2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct tsb {
+ u32 pcp_dei_vid;
+#define PCP_DEI_MASK 0xf
+#define VID_SHIFT 4
+#define VID_MASK 0xfff
+ u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK 0x1ff
+#define L4_PTR_SHIFT 9
+#define L4_PTR_MASK 0x1ff
+#define L4_UDP (1 << 18)
+#define L4_LENGTH_VALID (1 << 19)
+#define DEST_MAP_SHIFT 20
+#define DEST_MAP_MASK 0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct rsb {
+ u32 rx_status_len;
+ u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM (1 << 7)
+#define DESC_SOP (1 << 8)
+#define DESC_EOP (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST 0
+#define RX_STATUS_BCAST 0x04
+#define RX_STATUS_MCAST 0x08
+#define RX_STATUS_L2_MCAST 0x0c
+#define RX_STATUS_ERR (1 << 4)
+#define RX_STATUS_OVFLOW (1 << 5)
+#define RX_STATUS_PARSE_FAIL (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT 0x00
+#define TX_STATUS_VLAN_PCP_TSB 0x01
+#define TX_STATUS_VLAN_QUEUE 0x02
+#define TX_STATUS_VLAN_VID_TSB 0x03
+#define TX_STATUS_OWR_CRC (1 << 2)
+#define TX_STATUS_APP_CRC (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT 0
+#define TX_STATUS_BRCM_TAG_ZERO 0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
+#define TX_STATUS_SKIP_BYTES (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET 0
+#define REV_CNTL 0x00
+#define REV_MASK 0xffff
+
+#define RX_FLUSH_CNTL 0x04
+#define RX_FLUSH (1 << 0)
+
+#define TX_FLUSH_CNTL 0x08
+#define TX_FLUSH (1 << 0)
+
+#define MISC_CNTL 0x0c
+#define SYS_CLK_SEL (1 << 0)
+#define TDMA_EOP_SEL (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET 0x200
+#define SYS_PORT_INTRL2_1_OFFSET 0x240
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
+#define INTRL2_0_MPD (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
+#define INTRL2_0_RDMA_MBDONE (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
+#define INTRL2_0_TX_RING_FULL (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET 0x300
+
+#define RXCHK_CONTROL 0x00
+#define RXCHK_EN (1 << 0)
+#define RXCHK_SKIP_FCS (1 << 1)
+#define RXCHK_BAD_CSUM_DIS (1 << 2)
+#define RXCHK_BRCM_TAG_EN (1 << 3)
+#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
+#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
+#define RXCHK_PARSE_TNL (1 << 12)
+#define RXCHK_VIOL_EN (1 << 13)
+#define RXCHK_VIOL_DIS (1 << 14)
+#define RXCHK_INCOM_PKT (1 << 15)
+#define RXCHK_V6_DUPEXT_EN (1 << 16)
+#define RXCHK_V6_DUPEXT_DIS (1 << 17)
+#define RXCHK_ETHERTYPE_DIS (1 << 18)
+#define RXCHK_L2_HDR_DIS (1 << 19)
+#define RXCHK_L3_HDR_DIS (1 << 20)
+#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
+#define RXCHK_PARSE_AUTH (1 << 22)
+
+#define RXCHK_BRCM_TAG0 0x04
+#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK 0x24
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
+#define RXCHK_ETHERTYPE 0x48
+#define RXCHK_BAD_CSUM_CNTR 0x4C
+#define RXCHK_OTHER_DISC_CNTR 0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET 0x380
+#define TXCHK_PKT_RDY_THRESH 0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET 0x400
+
+#define RBUF_CONTROL 0x00
+#define RBUF_RSB_EN (1 << 0)
+#define RBUF_4B_ALGN (1 << 1)
+#define RBUF_BRCM_TAG_STRIP (1 << 2)
+#define RBUF_BAD_PKT_DISC (1 << 3)
+#define RBUF_RESUME_THRESH_SHIFT 4
+#define RBUF_RESUME_THRESH_MASK 0xff
+#define RBUF_OK_TO_SEND_SHIFT 12
+#define RBUF_OK_TO_SEND_MASK 0xff
+#define RBUF_CRC_REPLACE (1 << 20)
+#define RBUF_OK_TO_SEND_MODE (1 << 21)
+#define RBUF_RSB_SWAP (1 << 22)
+#define RBUF_ACPI_EN (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH 0x04
+
+#define RBUF_STATUS 0x08
+#define RBUF_WOL_MODE (1 << 0)
+#define RBUF_MPD (1 << 1)
+#define RBUF_ACPI (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR 0x0c
+#define RBUF_ERR_PKT_CNTR 0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET 0x600
+
+#define TBUF_CONTROL 0x00
+#define TBUF_BP_EN (1 << 0)
+#define TBUF_MAX_PKT_THRESH_SHIFT 1
+#define TBUF_MAX_PKT_THRESH_MASK 0x1f
+#define TBUF_FULL_THRESH_SHIFT 8
+#define TBUF_FULL_THRESH_MASK 0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET 0x800
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET 0xc
+
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RX_CNT_RST (1 << 0)
+#define MIB_RUNT_CNT_RST (1 << 1)
+#define MIB_TX_CNT_RST (1 << 2)
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET 0x2000
+
+#define RDMA_CONTROL 0x1000
+#define RDMA_EN (1 << 0)
+#define RDMA_RING_CFG (1 << 1)
+#define RDMA_DISC_EN (1 << 2)
+#define RDMA_BUF_DATA_OFFSET_SHIFT 4
+#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
+
+#define RDMA_STATUS 0x1004
+#define RDMA_DISABLED (1 << 0)
+#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
+#define RDMA_BP_STATUS (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE 0x1008
+
+#define RDMA_RING_BUF_SIZE 0x100c
+#define RDMA_RING_SIZE_SHIFT 16
+
+#define RDMA_WRITE_PTR_HI 0x1010
+#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_PROD_INDEX 0x1018
+#define RDMA_PROD_INDEX_MASK 0xffff
+
+#define RDMA_CONS_INDEX 0x101c
+#define RDMA_CONS_INDEX_MASK 0xffff
+
+#define RDMA_START_ADDR_HI 0x1020
+#define RDMA_START_ADDR_LO 0x1024
+#define RDMA_END_ADDR_HI 0x1028
+#define RDMA_END_ADDR_LO 0x102c
+
+#define RDMA_MBDONE_INTR 0x1030
+#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_TIMEOUT_SHIFT 16
+#define RDMA_TIMEOUT_MASK 0xffff
+
+#define RDMA_XON_XOFF_THRESH 0x1034
+#define RDMA_XON_XOFF_THRESH_MASK 0xffff
+#define RDMA_XOFF_THRESH_SHIFT 16
+
+#define RDMA_READ_PTR_HI 0x1038
+#define RDMA_READ_PTR_LO 0x103c
+
+#define RDMA_OVERRIDE 0x1040
+#define RDMA_LE_MODE (1 << 0)
+#define RDMA_REG_MODE (1 << 1)
+
+#define RDMA_TEST 0x1044
+#define RDMA_TP_OUT_SEL (1 << 0)
+#define RDMA_MEM_SEL (1 << 1)
+
+#define RDMA_DEBUG 0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS 32 /* rings = queues */
+#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET 0x4000
+#define TDMA_WRITE_PORT_OFFSET 0x0000
+#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
+ (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
+ (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR 0x00
+#define RING_HEAD_MASK 0x7ff
+#define RING_TAIL_SHIFT 11
+#define RING_TAIL_MASK 0x7ff
+#define RING_FLUSH (1 << 24)
+#define RING_EN (1 << 25)
+
+#define RING_COUNT 0x04
+#define RING_COUNT_MASK 0x7ff
+#define RING_BUFF_DONE_SHIFT 11
+#define RING_BUFF_DONE_MASK 0x7ff
+
+#define RING_MAX_HYST 0x08
+#define RING_MAX_THRESH_MASK 0x7ff
+#define RING_HYST_THRESH_SHIFT 11
+#define RING_HYST_THRESH_MASK 0x7ff
+
+#define RING_INTR_CONTROL 0x0c
+#define RING_INTR_THRESH_MASK 0x7ff
+#define RING_EMPTY_INTR_EN (1 << 15)
+#define RING_TIMEOUT_SHIFT 16
+#define RING_TIMEOUT_MASK 0xffff
+
+#define RING_PROD_CONS_INDEX 0x10
+#define RING_PROD_INDEX_MASK 0xffff
+#define RING_CONS_INDEX_SHIFT 16
+#define RING_CONS_INDEX_MASK 0xffff
+
+#define RING_MAPPING 0x14
+#define RING_QID_MASK 0x3
+#define RING_PORT_ID_SHIFT 3
+#define RING_PORT_ID_MASK 0x7
+#define RING_IGNORE_STATUS (1 << 6)
+#define RING_FAILOVER_EN (1 << 7)
+#define RING_CREDIT_SHIFT 8
+#define RING_CREDIT_MASK 0xffff
+
+#define RING_PCP_DEI_VID 0x18
+#define RING_VID_MASK 0x7ff
+#define RING_DEI (1 << 12)
+#define RING_PCP_SHIFT 13
+#define RING_PCP_MASK 0x7
+#define RING_PKT_SIZE_ADJ_SHIFT 16
+#define RING_PKT_SIZE_ADJ_MASK 0xf
+
+#define TDMA_DESC_RING_SIZE 28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
+ ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+ (TDMA_DESC_RING_BASE(i) + \
+ RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL 0x600
+#define TDMA_EN (1 << 0)
+#define TSB_EN (1 << 1)
+#define TSB_SWAP (1 << 2)
+#define ACB_ALGO (1 << 3)
+#define BUF_DATA_OFFSET_SHIFT 4
+#define BUF_DATA_OFFSET_MASK 0x3ff
+#define VLAN_EN (1 << 14)
+#define SW_BRCM_TAG (1 << 15)
+#define WNC_KPT_SIZE_UPDATE (1 << 16)
+#define SYNC_PKT_SIZE (1 << 17)
+#define ACH_TXDONE_DELAY_SHIFT 18
+#define ACH_TXDONE_DELAY_MASK 0xff
+
+#define TDMA_STATUS 0x604
+#define TDMA_DISABLED (1 << 0)
+#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE 0x608
+#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS 0x610
+#define TDMA_TPID 0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
+#define TDMA_FREE_HEAD_MASK 0x7ff
+#define TDMA_FREE_TAIL_SHIFT 11
+#define TDMA_FREE_TAIL_MASK 0x7ff
+
+#define TDMA_FREE_LIST_COUNT 0x61c
+#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
+
+#define TDMA_TIER2_ARB_CTRL 0x620
+#define TDMA_ARB_MODE_RR 0
+#define TDMA_ARB_MODE_WEIGHT_RR 0x1
+#define TDMA_ARB_MODE_STRICT 0x2
+#define TDMA_ARB_MODE_DEFICIT_RR 0x3
+#define TDMA_CREDIT_SHIFT 4
+#define TDMA_CREDIT_MASK 0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL 0x624
+#define TDMA_ARB_EN (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
+#define TDMA_TIER1_ARB_1_CTRL 0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
+#define TDMA_TIER1_ARB_2_CTRL 0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
+#define TDMA_TIER1_ARB_3_CTRL 0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
+#define TDMA_LE_MODE (1 << 0)
+#define TDMA_REG_MODE (1 << 1)
+
+#define TDMA_TEST 0x648
+#define TDMA_TP_OUT_SEL (1 << 0)
+#define TDMA_MEM_TM (1 << 1)
+
+#define TDMA_DEBUG 0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+ u32 addr_status_len;
+ u32 addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS 1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC 1536
+
+#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+ struct bcm_sysport_rx_counters rx;
+ struct bcm_sysport_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rxchk_bad_csum;
+ u32 rxchk_other_pkt_disc;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+ BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_MIB_RX,
+ BCM_SYSPORT_STAT_MIB_TX,
+ BCM_SYSPORT_STAT_RUNT,
+ BCM_SYSPORT_STAT_RXCHK,
+ BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RXCHK, \
+ .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RBUF, \
+ .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcm_sysport_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+ struct sk_buff *skb; /* SKB for RX packets */
+ void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+ spinlock_t lock; /* Ring lock for tx reclaim/xmit */
+ struct napi_struct napi; /* NAPI per tx queue */
+ dma_addr_t desc_dma; /* DMA cookie */
+ unsigned int index; /* Ring index */
+ unsigned int size; /* Ring current size */
+ unsigned int alloc_size; /* Ring one-time allocated size */
+ unsigned int desc_count; /* Number of descriptors */
+ unsigned int curr_desc; /* Current descriptor */
+ unsigned int c_index; /* Last consumer index */
+ unsigned int p_index; /* Current producer index */
+ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
+ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
+ struct bcm_sysport_priv *priv; /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+ void __iomem *base;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+ struct napi_struct napi ____cacheline_aligned;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ int irq0;
+ int irq1;
+
+ /* Transmit rings */
+ struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+ /* Receive queue */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ unsigned int rx_bd_assign_index;
+ struct bcm_sysport_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* PHY device */
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ /* Misc fields */
+ unsigned int rx_csum_en:1;
+ unsigned int tsb_en:1;
+ unsigned int crc_fwd:1;
+ u16 rev;
+
+ /* MIB related fields */
+ struct bcm_sysport_mib mib;
+
+ /* Ethtool */
+ u32 msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..8efeed3325b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2252,12 +2252,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
- cmd->port = PORT_FIBRE;
+ cmd->port = PORT_OTHER;
} else
cmd->port = PORT_OTHER;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ca95cf2954eb..cced1a3d5181 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9cfa4b4bb089..adebbf849cdb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt = (void *)rsp;
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..31c376628bfd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -409,6 +412,13 @@ struct be_resources {
u32 if_cap_flags;
};
+struct rss_info {
+ u64 rss_flags;
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -445,7 +455,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
- u8 vlan_tag[VLAN_N_VID];
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +517,7 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
u8 pf_number;
- u64 rss_flags;
+ struct rss_info rss_info;
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..476752d0a6a4 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
}
};
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
- u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -197,7 +196,7 @@ done:
/* Link state evt is a string of bytes; no need for endian swapping */
static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
+ struct be_async_event_link_state *evt)
{
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
@@ -221,7 +220,9 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
+ struct
+ be_async_event_grp5_cos_priority
+ *evt)
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
@@ -233,7 +234,9 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
+ struct
+ be_async_event_grp5_qos_link_speed
+ *evt)
{
if (adapter->phy.link_speed >= 0 &&
evt->physical_port == adapter->port_num)
@@ -242,7 +245,9 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
+ struct
+ be_async_event_grp5_pvid_state
+ *evt)
{
if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
@@ -253,7 +258,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
+ u32 trailer, struct be_mcc_compl *evt)
{
u8 event_type = 0;
@@ -281,7 +286,7 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *cmp)
+ u32 trailer, struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
@@ -370,10 +375,10 @@ int be_process_mcc(struct be_adapter *adapter)
(struct be_async_event_link_state *) compl);
else if (is_grp5_evt(compl->flags))
be_async_grp5_evt_process(adapter,
- compl->flags, compl);
+ compl->flags, compl);
else if (is_dbg_evt(compl->flags))
be_async_dbg_evt_process(adapter,
- compl->flags, compl);
+ compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
@@ -560,10 +565,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +633,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
- dev_info(dev, "Waiting for POST, %ds elapsed\n",
- timeout);
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
@@ -649,8 +651,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
- unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
{
wrb->tag0 = addr & 0xFFFFFFFF;
wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +660,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len,
- struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
{
struct be_sge *sge;
@@ -683,7 +685,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
+ struct be_dma_mem *mem)
{
int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
u64 dma = (u64)mem->dma;
@@ -868,7 +870,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
/* Support for EQ_CREATEv2 available only SH-R onwards */
if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +920,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
@@ -940,7 +944,7 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -956,7 +960,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1017,7 @@ err:
/* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
- struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -1028,17 +1033,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
+ coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
+ ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1059,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
- no_delay);
+ no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
- ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1092,8 @@ static u32 be_encoded_q_len(int q_len)
}
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1109,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
} else {
req->hdr.version = 1;
@@ -1145,8 +1150,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
}
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1167,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1193,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
}
int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq, struct be_queue_info *cq)
{
int status;
@@ -1213,7 +1218,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1250,8 +1255,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u32 if_id, u32 rss, u8 *rss_id)
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1273,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1300,7 @@ err:
* Uses Mbox
*/
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
+ int queue_type)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1339,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
}
be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
- NULL);
+ NULL);
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1366,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1389,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1428,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
@@ -1452,7 +1459,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
hdr = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
@@ -1472,7 +1480,7 @@ err:
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
@@ -1493,8 +1501,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
- nonemb_cmd);
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1561,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
/* version 1 of the cmd is not supported only by BE2 */
if (!BE2_chip(adapter))
@@ -1598,8 +1607,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
- wrb, NULL);
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
be_mcc_notify(adapter);
@@ -1625,7 +1634,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+ NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1655,8 +1665,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
if (!get_fat_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -1679,8 +1689,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
- &get_fat_cmd);
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1701,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
- resp->data_buffer,
- le32_to_cpu(resp->read_log_length));
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1702,14 +1712,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
}
err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
}
/* Uses synchronous mcc */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash)
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1735,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1769,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -1777,7 +1788,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous)
+ u32 num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1804,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
req->interface_id = if_id;
- req->promiscuous = promiscuous;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1827,18 +1835,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
memset(req, 0, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
- wrb, mem);
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1876,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
+ req->if_flags_mask) {
dev_warn(&adapter->pdev->dev,
"Cannot set rx filter flags 0x%x\n",
req->if_flags_mask);
@@ -1905,7 +1914,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1948,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1968,7 +1979,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2011,7 +2023,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
status = be_mbox_notify_wait(adapter);
@@ -2020,47 +2033,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
}
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size)
+ u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
- 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
- 0x3ea83c02, 0x4a110304};
int status;
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(rss_hash_opts);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter))
+ if (!BEx_chip(adapter))
req->hdr.version = 1;
memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
+ u8 bcn, u8 sts, u8 state)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2089,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
req->port_num = port_num;
req->beacon_state = state;
@@ -2107,7 +2121,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
req->port_num = port_num;
@@ -2146,20 +2161,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object), wrb,
- NULL);
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
+ write_length, ctxt, data_size);
if (data_size == 0)
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
+ eof, ctxt, 1);
else
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
+ eof, ctxt, 0);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2182,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
@@ -2197,8 +2212,8 @@ err_unlock:
}
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status)
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2231,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_OBJECT,
- sizeof(struct lancer_cmd_req_read_object), wrb,
- NULL);
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
req->desired_read_len = cpu_to_le32(data_size);
req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2259,7 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2261,7 +2276,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2318,7 +2334,7 @@ err:
}
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2350,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
memcpy(req->magic_mac, mac, ETH_ALEN);
status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2379,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
- NULL);
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
req->src_port = port_num;
req->dest_port = port_num;
@@ -2378,7 +2394,8 @@ err:
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2413,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2439,7 @@ err:
}
int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
+ u32 byte_cnt, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2455,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2484,7 @@ err:
}
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2500,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
status = be_mcc_notify_wait(adapter);
@@ -2510,8 +2529,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2521,8 +2539,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -2544,8 +2562,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2568,7 +2585,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2614,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ &attribs_cmd.dma);
if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -2613,8 +2629,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
req = attribs_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
- &attribs_cmd);
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2649,7 +2665,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2779,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure during GET_MAC_LIST\n");
+ "Memory allocation failure during GET_MAC_LIST\n");
return -ENOMEM;
}
@@ -2831,18 +2848,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
/* If no active mac_id found, return first mac addr */
*pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
- ETH_ALEN);
+ ETH_ALEN);
}
out:
spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
- u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
{
if (!active)
@@ -2892,7 +2909,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -2906,8 +2923,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
req->hdr.domain = domain;
req->mac_count = mac_count;
@@ -2917,8 +2934,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size,
- cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2963,7 +2979,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3026,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3045,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
- be_dws_le_to_cpu(&resp->context,
- sizeof(resp->context));
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
- pvid, &resp->context);
+ pvid, &resp->context);
if (pvid)
*pvid = le16_to_cpu(vid);
if (mode)
@@ -3062,11 +3079,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -3349,8 +3364,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3396,7 +3410,7 @@ err:
/* Uses mbox */
static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3438,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
/* Uses sync mcc */
static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3498,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le32_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3859,7 +3873,7 @@ err:
}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
- int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..228d4b611084 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2060,7 +2060,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
char *fw_on_flash);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous);
+ u32 num);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2068,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
int be_cmd_reset_function(struct be_adapter *adapter);
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
+ u32 rss_hash_opts, u16 table_size, u8 *rss_hkey);
int be_process_mcc(struct be_adapter *adapter);
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..970ae337daac 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = 0;
}
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
- file_name, &data_read, &eof, &addn_status);
+ file_name, &data_read, &eof,
+ &addn_status);
return data_read;
}
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
- u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ &read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading dump\n");
+ "Memory allocation failure while reading dump\n");
return -ENOMEM;
}
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
- total_read_len, file_name, &read_len,
- &eof, &addn_status);
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ read_cmd.dma);
return status;
}
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
+ LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
+ regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
return 0;
}
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
}
}
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
+ adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
return status;
}
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
+ 4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
@@ -773,20 +766,17 @@ err:
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
+ u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
+ loopback_type, 1500, 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
return *status;
}
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0)
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
}
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_PF_FILE);
+ LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_VF_FILE);
+ LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
switch (flow_type) {
case TCP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
}
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct be_rx_obj *rxo;
int status = 0, i, j;
u8 rsstable[128];
- u32 rss_flags = adapter->rss_flags;
+ u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return -EINVAL;
}
- if (rss_flags == adapter->rss_flags)
+ if (rss_flags == adapter->rss_info.rss_flags)
return status;
if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
}
}
- status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, 128, adapter->rss_info.rss_hkey);
if (!status)
- adapter->rss_flags = rss_flags;
+ adapter->rss_info.rss_flags = rss_flags;
return status;
}
@@ -1103,6 +1090,68 @@ static int be_set_channels(struct net_device *netdev,
return be_update_queues(adapter);
}
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1178,10 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
.set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..3f04356afa82 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
+ u16 len, u16 entry_size)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- &reg);
+ &reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
return;
pci_write_config_dword(adapter->pdev,
- PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error)
return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- if (be_roce_supported(adapter)) {
+ if (be_roce_supported(adapter)) {
drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
}
static void populate_erx_stats(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u32 erx_stat)
+ struct be_rx_obj *rxo, u32 erx_stat)
{
if (!BEx_chip(adapter))
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
}
static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs,
+ bool stopped)
{
struct be_tx_stats *stats = tx_stats(txo);
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 vlan_prio;
u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ bool skip_hw_vlan)
{
u16 vlan_tag, proto;
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
+ bool unmap_single)
{
dma_addr_t dma;
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
static int be_vid_config(struct be_adapter *adapter)
{
u16 vids[BE_NUM_VLANS_SUPPORTED];
- u16 num = 0, i;
+ u16 num = 0, i = 0;
int status = 0;
/* No need to further configure vids if in promiscuous mode */
@@ -1109,13 +1105,10 @@ static int be_vid_config(struct be_adapter *adapter)
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++)
- if (adapter->vlan_tag[i])
- vids[num++] = cpu_to_le16(i);
-
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 0);
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
@@ -1160,16 +1153,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return status;
- if (adapter->vlan_tag[vid])
+ if (test_bit(vid, adapter->vids))
return status;
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
if (status) {
adapter->vlans_added--;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
}
return status;
@@ -1184,12 +1177,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
goto ret;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
ret:
return status;
}
@@ -1254,8 +1247,10 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Set to MCAST promisc mode if setting MULTICAST address fails */
if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
+ dev_info(&adapter->pdev->dev,
+ "Exhausted multicast HW filters.\n");
+ dev_info(&adapter->pdev->dev,
+ "Disabling HW multicast filtering.\n");
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
}
done:
@@ -1287,7 +1282,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
+ mac, vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -1295,7 +1290,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
}
static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
+ struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1316,8 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,8 +1342,7 @@ static int be_set_vf_vlan(struct net_device *netdev,
return status;
}
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
@@ -1369,7 +1362,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
+ "tx rate %d on VF %d failed\n", rate, vf);
else
adapter->vf_cfg[vf].tx_rate = rate;
return status;
@@ -1469,7 +1462,7 @@ modify_eqd:
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = rx_stats(rxo);
@@ -1566,7 +1559,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1725,8 +1719,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
rxcp->tunneled =
@@ -1757,8 +1751,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1793,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
+ !test_bit(rxcp->vlan_tag, adapter->vids))
rxcp->vlanf = 0;
}
@@ -1915,7 +1909,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
}
static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
+ struct be_tx_obj *txo, u16 last_index)
{
struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
@@ -2122,7 +2116,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
+ sizeof(struct be_eq_entry));
if (rc)
return rc;
@@ -2155,7 +2149,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
+ sizeof(struct be_mcc_compl)))
goto err;
/* Use the default EQ for MCC completions */
@@ -2275,7 +2269,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
rxo->adapter = adapter;
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
+ sizeof(struct be_eth_rx_compl));
if (rc)
return rc;
@@ -2339,7 +2333,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2359,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
* promiscuous mode on some skews
*/
if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
+ !lancer_chip(adapter))) {
be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -2405,8 +2399,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
if (!txcp)
break;
num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
+ AMAP_GET_BITS(struct
+ amap_eth_tx_compl,
+ wrb_index, txcp));
}
if (work_done) {
@@ -2416,7 +2411,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2510,9 +2505,9 @@ void be_detect_error(struct be_adapter *adapter)
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
+ SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2526,13 @@ void be_detect_error(struct be_adapter *adapter)
}
} else {
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
+ PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2619,7 @@ fail:
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eqo)
+ struct be_eq_obj *eqo)
{
return adapter->msix_entries[eqo->msix_idx].vector;
}
@@ -2648,7 +2643,7 @@ err_msix:
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- status);
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2774,7 +2769,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rsstable[128];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2795,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ j += adapter->num_rx_qs - 1) {
for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
- rsstable[j + i] = rxo->rss_id;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
}
}
- adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter))
- adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6;
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
} else {
/* Disable RSS, if only default RX Q is created */
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
}
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
+ get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss_hkey);
if (rc) {
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2897,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (enable) {
status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ PCICFG_PM_CONTROL_OFFSET,
+ PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
@@ -2905,7 +2907,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
+ adapter->netdev->dev_addr,
+ &cmd);
pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
@@ -2944,7 +2947,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address assignment failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n",
+ vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3086,9 +3090,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+ status =
+ be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -3594,8 +3600,8 @@ static void be_netpoll(struct net_device *netdev)
static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
@@ -3605,11 +3611,10 @@ static bool be_flash_redboot(struct be_adapter *adapter,
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
+ status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
+ "could not get crc from flash, not flashing redboot\n");
return false;
}
@@ -3649,8 +3654,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
+ int header_size,
+ const struct firmware *fw)
{
struct flash_section_info *fsec = NULL;
const u8 *p = fw->data;
@@ -3666,7 +3671,7 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
}
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
{
u32 total_bytes = 0, flash_op, num_bytes = 0;
int status = 0;
@@ -3693,7 +3698,7 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
+ flash_op, num_bytes);
if (status) {
if (status == ILLEGAL_IOCTL_REQ &&
optype == OPTYPE_PHY_FW)
@@ -3708,10 +3713,8 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd,
- int num_of_images)
-
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
@@ -3793,8 +3796,10 @@ static int be_flash_BEx(struct be_adapter *adapter,
if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
redboot = be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size + img_hdrs_size);
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size);
if (!redboot)
continue;
}
@@ -3805,7 +3810,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size);
if (status) {
dev_err(&adapter->pdev->dev,
"Flashing section type %d failed.\n",
@@ -3817,8 +3822,8 @@ static int be_flash_BEx(struct be_adapter *adapter,
}
static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
int status = 0, i, filehdr_size = 0;
int img_offset, img_size, img_optype, redboot;
@@ -3866,8 +3871,9 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
if (img_optype == OPTYPE_REDBOOT) {
redboot = be_flash_redboot(adapter, fw->data,
- img_offset, img_size,
- filehdr_size + img_hdrs_size);
+ img_offset, img_size,
+ filehdr_size +
+ img_hdrs_size);
if (!redboot)
continue;
}
@@ -3889,7 +3895,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
}
static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
+ const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +3961,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev,
"Firmware load error. "
@@ -3976,9 +3982,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW"
- " to be active\n");
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW to be active\n");
}
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4047,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
switch (ufi_type) {
case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
+ &flash_cmd, num_imgs);
break;
case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4117,7 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4155,8 +4159,7 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask)
+ struct net_device *dev, u32 filter_mask)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4870,7 +4873,7 @@ static void be_shutdown(struct pci_dev *pdev)
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phy, ifr, cmd);
}
-static int ethoc_config(struct net_device *dev, struct ifmap *map)
-{
- return -ENOSYS;
-}
-
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
- .ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..e27e60910949 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..4e5ad7ebe1f2 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..5f5539561661 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1334,6 +1334,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
u16 emi_addr;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..e4207efd13f8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -599,6 +599,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -615,6 +616,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -1198,6 +1200,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1756,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1960,6 +1964,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2204,6 +2209,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2921,6 +2927,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -3239,6 +3246,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -4695,6 +4703,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4808,6 +4817,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4930,6 +4940,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -5170,7 +5181,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5226,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6220,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7039,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7156,7 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..a46571c1e9f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -329,9 +329,7 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
- struct work_struct ptp_tx_work;
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..34415d342ece 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -585,6 +595,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +719,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -835,6 +852,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -929,6 +949,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..f2ba4b76ecd3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -182,9 +182,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +204,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -1289,27 +1287,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1499,6 +1476,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1583,11 +1569,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..22eefda3a530 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -975,6 +975,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -1300,6 +1307,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -1311,7 +1319,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -2094,8 +2109,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2253,6 +2268,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..1aaec400b28e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1744,10 +1744,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
- } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1967,8 +1963,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd-atr off\n");
dev_info(&pf->pdev->dev, " fd-atr on\n");
- dev_info(&pf->pdev->dev, " fd-sb off\n");
- dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..0cf47c958081 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -649,7 +649,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
struct i40e_ring *rx_ring;
@@ -662,14 +662,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i + 2] = rx_ring->stats.packets;
- data[i + 3] = rx_ring->stats.bytes;
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1189,6 +1191,12 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
return -EINVAL;
fsp->flow_type = rule->flow_type;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..5c341aeb5d53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -747,6 +747,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cf0761f08911..e399f9b70777 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_BUILD 46
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2312,6 +2312,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -3160,9 +3162,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
- if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
- if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
/* turn on/off the queue */
@@ -3178,13 +3178,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3223,15 +3218,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3244,13 +3233,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3513,6 +3497,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+}
+
+/**
* i40e_quiesce_vsi - Pause a given VSI
* @vsi: the VSI being paused
**/
@@ -3525,8 +3522,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3543,7 +3539,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -4028,6 +4024,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
pf->vsi[v]->seid);
/* Will try to configure as many components */
} else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
@@ -4067,6 +4065,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_ENABLED;
}
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
}
out:
@@ -4309,24 +4310,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
if (err)
goto err_setup_rx;
- if (!vsi->netdev) {
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
err = EINVAL;
goto err_setup_rx;
}
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
-
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
err = i40e_up_complete(vsi);
if (err)
@@ -4383,14 +4392,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -5226,9 +5228,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- /* increment MSI-X count because current FW skips one */
- pf->hw.func_caps.num_msix_vectors++;
-
if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
(pf->hw.aq.fw_maj_ver < 2)) {
pf->hw.func_caps.num_msix_vectors++;
@@ -5267,8 +5266,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
@@ -5288,47 +5286,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- goto err_vsi;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ return;
}
- new_vsi = true;
}
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
- err = i40e_vsi_setup_tx_resources(vsi);
- if (err)
- goto err_setup_tx;
- err = i40e_vsi_setup_rx_resources(vsi);
- if (err)
- goto err_setup_rx;
-
- if (new_vsi) {
- char int_name[IFNAMSIZ + 9];
- err = i40e_vsi_configure(vsi);
- if (err)
- goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
- err = i40e_up_complete(vsi);
- if (err)
- goto err_up_complete;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
- }
-
- return;
-
-err_up_complete:
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-err_setup_rx:
- i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
- i40e_vsi_free_tx_resources(vsi);
-err_vsi:
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- i40e_vsi_clear(vsi);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -5642,7 +5605,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
- const int vxlan_hdr_qwords = 4;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u8 filter_index;
@@ -5660,7 +5622,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
port = pf->vxlan_ports[i];
ret = port ?
i40e_aq_add_udp_tunnel(hw, ntohs(port),
- vxlan_hdr_qwords,
I40E_AQC_TUNNEL_TYPE_VXLAN,
&filter_index, NULL)
: i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -6649,6 +6610,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
}
#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -6676,6 +6727,13 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -6720,10 +6778,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
@@ -6982,11 +7042,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
unregister_netdev(vsi->netdev);
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -8090,6 +8146,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -8243,7 +8300,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- goto err_init_dcb;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -8279,6 +8336,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -8332,6 +8396,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dv.minor_version = DRV_VERSION_MINOR;
dv.build_version = DRV_VERSION_BUILD;
dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
/* since everything's happy, start the service_task timer */
@@ -8373,9 +8438,6 @@ err_vsis:
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..d351832bf235 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -157,8 +157,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +167,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..1fedc7a1589d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -217,40 +217,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
- struct i40e_pf *pf = container_of(work, struct i40e_pf,
- ptp_tx_work);
- struct i40e_hw *hw = &pf->hw;
- u32 prttsyn_stat_0;
-
- if (!pf->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(pf->ptp_tx_start +
- I40E_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
- pf->ptp_tx_skb = NULL;
- pf->tx_hwtstamp_timeouts++;
- dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
- return;
- }
-
- prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
- i40e_ptp_tx_hwtstamp(pf);
- else
- schedule_work(&pf->ptp_tx_work);
-}
-
-/**
* i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
@@ -608,7 +574,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
u32 regval;
spin_lock_init(&pf->tmreg_lock);
- INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
@@ -647,7 +612,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_tx = false;
pf->ptp_rx = false;
- cancel_work_sync(&pf->ptp_tx_work);
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..ece7ae99b03a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1825,9 +1825,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..c4df8bac2db1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -167,6 +167,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -409,6 +412,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..82e7abf64308 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
error_alloc_vsi_res:
return ret;
}
@@ -2022,10 +2049,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
/* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
+ f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false);
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF ucast filter\n");
@@ -2088,18 +2116,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
* Before deleting all the old VLAN filters we must add new ones
* with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
* MAC addresses deleted.
*/
- if (!(vlan_id || qos) && vsi->info.pvid)
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
if (vsi->info.pvid) {
@@ -2160,7 +2198,61 @@ error_pvid:
**/
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.",
+ tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0,
+ NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = tx_rate;
+error:
+ return ret;
}
/**
@@ -2200,10 +2292,17 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->tx_rate = vf->tx_rate;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
ret = 0;
error_param:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..ba3d1f8414be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,6 +98,7 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..c79df257f830 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -28,6 +28,16 @@
#include "i40e_prototype.h"
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -659,6 +669,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -786,6 +802,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -880,6 +899,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..7d24be528601 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -87,6 +87,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..6e617669c326 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -180,9 +180,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +202,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -1289,27 +1287,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1499,6 +1476,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1583,11 +1569,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..775fcb2463d7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -53,6 +53,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..51a6dee3c7b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -173,6 +173,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -415,6 +418,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..a46be016039e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -365,6 +365,316 @@ static int i40evf_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh_indir - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
+ }
+
+ return 0;
+}
+
static struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
@@ -378,6 +688,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh_indir = i40evf_get_rxfh_indir,
+ .set_rxfh_indir = i40evf_set_rxfh_indir,
+ .get_channels = i40evf_get_channels,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..6edd581dffa7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -25,13 +25,15 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.23"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -1309,7 +1311,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- dev_info(&adapter->pdev->dev, "Checking for redemption\n");
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1534,9 +1535,13 @@ static void i40evf_reset_task(struct work_struct *work)
rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev))
- i40evf_close(adapter->netdev);
-
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..2e36c670d8df 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -526,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -1180,8 +1176,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1216,7 +1212,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1269,7 +1265,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1316,7 +1312,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1411,7 +1407,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1436,9 +1432,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1442,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1676,7 +1671,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1689,7 +1684,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1920,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1948,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -2005,14 +2000,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2236,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2259,7 +2254,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2436,8 +2431,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2447,8 +2441,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..f85be6695e44 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,33 +304,25 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
@@ -406,12 +395,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1011,8 +1000,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..2231598fb42d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -435,6 +432,7 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..92bcdbe756b2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..424f16c43759 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -2244,7 +2240,7 @@ void igb_power_down_phy_copper(struct e1000_hw *hw)
hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
}
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..fe921e29dda8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..833bbb948d97 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -301,9 +298,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +355,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..333a2b0bbada 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -286,7 +283,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -399,7 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +883,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1057,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1100,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1129,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1148,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1170,7 +1169,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1196,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1206,11 +1206,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1218,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1388,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1412,7 +1413,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1459,7 +1460,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1481,7 +1482,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1503,7 +1504,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1515,7 +1516,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2413,9 +2415,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2425,9 +2429,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..bfcda8a455f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -2529,7 +2526,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -3077,6 +3075,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3248,7 +3247,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3389,7 +3388,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3430,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3512,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -4058,7 +4059,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -4077,7 +4079,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4211,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4245,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4277,12 +4277,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4344,6 +4343,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4483,13 +4483,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4674,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4962,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -5140,7 +5141,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -5621,6 +5622,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5674,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5770,6 +5773,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5798,6 +5802,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5902,8 +5907,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -6494,7 +6499,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6968,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7057,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7178,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -7955,11 +7961,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -8049,6 +8057,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..c688c8a4c063 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..1c52e4753480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..bdc55819179d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -271,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -2106,19 +2110,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2181,12 +2191,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..d15ff2e5edb7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
return unlikely(!addr);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
}
#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
- u32 value;
-
- if (ixgbe_removed(reg_addr))
- return IXGBE_FAILED_READ_REG;
- value = readl(reg_addr + reg);
- if (unlikely(value == IXGBE_FAILED_READ_REG))
- ixgbe_check_remove(hw, reg);
- return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d62e7a25cf97..8089ea9f2fba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u32 value;
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
-
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
- ixgbe_write_uc_addr_list(adapter->netdev);
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 pool)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int entry;
-
- entry = hw->mac.num_rar_entries - pool;
- hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{
struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4742,7 +4877,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -4778,9 +4915,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5164,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -7172,16 +7314,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -8187,6 +8330,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -8319,6 +8464,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8392,6 +8538,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..a01417c06620 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..cea640147604 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..551d6089a4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d0799e8e31e4..eacce3a2e9ec 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..24201033661b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
- mlx4_warn(dev, "Communication channel is not idle."
- "my toggle is %d (cmd:0x%x)\n",
+ mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
@@ -422,9 +421,8 @@ static int mlx4_slave_c