aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt19
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt62
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt64
-rw-r--r--Documentation/networking/mpls-sysctl.txt19
-rw-r--r--MAINTAINERS12
-rw-r--r--drivers/atm/ambassador.c5
-rw-r--r--drivers/hv/ring_buffer.c94
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h3
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_main.c53
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c659
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c300
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h44
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c78
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h16
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/apm/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/Makefile1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig11
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Makefile6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.c71
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.h43
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c116
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.h110
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c756
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h75
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.c81
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.h119
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c64
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c34
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c51
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c215
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c62
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c178
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c168
-rw-r--r--drivers/net/ethernet/marvell/Kconfig4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1186
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c607
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c688
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c172
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1143
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h159
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1092
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1566
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h825
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c184
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c220
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c30
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c222
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c14
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c45
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c55
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/silan/sc92031.c83
-rw-r--r--drivers/net/ethernet/sis/sis190.c14
-rw-r--r--drivers/net/ethernet/sis/sis900.c18
-rw-r--r--drivers/net/ethernet/smsc/epic100.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c51
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c302
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c168
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c119
-rw-r--r--drivers/net/ethernet/sun/cassini.c98
-rw-r--r--drivers/net/ethernet/sun/niu.c37
-rw-r--r--drivers/net/ethernet/sun/sungem.c98
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig41
-rw-r--r--drivers/net/ethernet/synopsys/Makefile9
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c736
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c648
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c3146
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1334
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c80
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h746
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h651
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c51
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c24
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.c62
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c19
-rw-r--r--drivers/net/gtp.c531
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c186
-rw-r--r--drivers/net/hyperv/netvsc_drv.c58
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ntb_netdev.c25
-rw-r--r--drivers/net/phy/bcm-phy-lib.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c215
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c3
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/ax88179_178a.c14
-rw-r--r--drivers/net/usb/catc.c31
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/usb/rtl8150.c35
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/scsi/qedf/Makefile2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c190
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h93
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c44
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h85
-rw-r--r--drivers/scsi/qedf/qedf.h23
-rw-r--r--drivers/scsi/qedf/qedf_els.c25
-rw-r--r--drivers/scsi/qedf/qedf_io.c670
-rw-r--r--drivers/scsi/qedi/Makefile2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1068
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c781
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h117
-rw-r--r--drivers/scsi/qedi/qedi_fw_scsi.h55
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
-rw-r--r--include/linux/brcmphy.h3
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/hyperv.h96
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/qed/common_hsi.h30
-rw-r--r--include/linux/qed/eth_common.h3
-rw-r--r--include/linux/qed/fcoe_common.h180
-rw-r--r--include/linux/qed/iscsi_common.h241
-rw-r--r--include/linux/qed/qed_if.h48
-rw-r--r--include/linux/qed/rdma_common.h3
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h1
-rw-r--r--include/linux/stmmac.h36
-rw-r--r--include/net/dsa.h5
-rw-r--r--include/net/ip_fib.h12
-rw-r--r--include/net/mpls_iptunnel.h2
-rw-r--r--include/net/netns/mpls.h3
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sm.h16
-rw-r--r--include/net/sctp/ulpevent.h8
-rw-r--r--include/net/secure_seq.h6
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/tc_act/tc_vlan.h5
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mpls_iptunnel.h2
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/sctp.h31
-rw-r--r--net/atm/common.c22
-rw-r--r--net/core/drop_monitor.c5
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/flow_dissector.c426
-rw-r--r--net/core/lwtunnel.c2
-rw-r--r--net/core/secure_seq.c13
-rw-r--r--net/core/sock.c91
-rw-r--r--net/decnet/af_decnet.c13
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/fib_notifier.c86
-rw-r--r--net/ipv4/fib_rules.c8
-rw-r--r--net/ipv4/fib_trie.c108
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv6/addrconf.c114
-rw-r--r--net/ipv6/tcp_ipv6.c22
-rw-r--r--net/ipv6/udp.c59
-rw-r--r--net/mpls/af_mpls.c98
-rw-r--r--net/mpls/internal.h7
-rw-r--r--net/mpls/mpls_iptunnel.c73
-rw-r--r--net/rds/ib_cm.c5
-rw-r--r--net/rds/ib_fmr.c38
-rw-r--r--net/rds/ib_mr.h2
-rw-r--r--net/sched/sch_api.c42
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_prio.c5
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c81
-rw-r--r--net/sctp/stream.c396
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/ulpevent.c56
-rwxr-xr-xtools/hv/bondvf.sh18
265 files changed, 22791 insertions, 7338 deletions
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 10587bdadbbe..26c77d985faf 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
@@ -2,11 +2,14 @@
Required properties:
- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
- "brcm,genet-v3", "brcm,genet-v4".
+ "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5".
- reg: address and length of the register set for the device
-- interrupts: must be two cells, the first cell is the general purpose
- interrupt line, while the second cell is the interrupt for the ring
- RX and TX queues operating in ring mode
+- interrupts and/or interrupts-extended: must be two cells, the first cell
+ is the general purpose interrupt line, while the second cell is the
+ interrupt for the ring RX and TX queues operating in ring mode. An
+ optional third interrupt cell for Wake-on-LAN can be specified.
+ See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for information on the property specifics.
- phy-mode: see ethernet.txt file in the same directory
- #address-cells: should be 1
- #size-cells: should be 1
@@ -29,15 +32,15 @@ Optional properties:
Required child nodes:
-- mdio bus node: this node should always be present regarless of the PHY
+- mdio bus node: this node should always be present regardless of the PHY
configuration of the GENET instance
MDIO bus node required properties:
- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
- "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the
- parent node compatible property (e.g: brcm,genet-v4 pairs with
- brcm,genet-mdio-v4)
+ "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5", the version
+ has to match the parent node compatible property (e.g: brcm,genet-v4 pairs
+ with brcm,genet-mdio-v4)
- reg: address and length relative to the parent node base register address
- #address-cells: address cell for MDIO bus addressing, should be 1
- #size-cells: size of the cells for MDIO bus addressing, should be 0
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
index ab0bb4247d14..4648948f7c3b 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
@@ -2,8 +2,9 @@
Required properties:
- compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2",
- "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio"
-- reg: address and length of the regsiter set for the device, first one is the
+ "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5" or
+ "brcm,unimac-mdio"
+- reg: address and length of the register set for the device, first one is the
base register, and the second one is optional and for indirect accesses to
larger than 16-bits MDIO transactions
- reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw"
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index 4754364df4c6..6b4956beff8c 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -1,17 +1,28 @@
-* Marvell Armada 375 Ethernet Controller (PPv2)
+* Marvell Armada 375 Ethernet Controller (PPv2.1)
+ Marvell Armada 7K/8K Ethernet Controller (PPv2.2)
Required properties:
-- compatible: should be "marvell,armada-375-pp2"
+- compatible: should be one of:
+ "marvell,armada-375-pp2"
+ "marvell,armada-7k-pp2"
- reg: addresses and length of the register sets for the device.
- Must contain the following register sets:
+ For "marvell,armada-375-pp2", must contain the following register
+ sets:
- common controller registers
- LMS registers
- In addition, at least one port register set is required.
-- clocks: a pointer to the reference clocks for this device, consequently:
- - main controller clock
- - GOP clock
-- clock-names: names of used clocks, must be "pp_clk" and "gop_clk".
+ - one register area per Ethernet port
+ For "marvell,armada-7k-pp2", must contain the following register
+ sets:
+ - packet processor registers
+ - networking interfaces registers
+
+- clocks: pointers to the reference clocks for this device, consequently:
+ - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
+ - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
+ - MG clock (only for armada-7k-pp2)
+- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
+ "mg_clk" (the latter only for armada-7k-pp2).
The ethernet ports are represented by subnodes. At least one port is
required.
@@ -19,8 +30,10 @@ required.
Required properties (port):
- interrupts: interrupt for the port
-- port-id: should be '0' or '1' for ethernet ports, and '2' for the
- loopback port
+- port-id: ID of the port from the MAC point of view
+- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the
+ GOP (Group Of Ports) point of view. This ID is used to index the
+ per-port registers in the second register area.
- phy-mode: See ethernet.txt file in the same directory
Optional properties (port):
@@ -29,7 +42,7 @@ Optional properties (port):
- phy: a phandle to a phy node defining the PHY address (as the reg
property, a single integer).
-Example:
+Example for marvell,armada-375-pp2:
ethernet@f0000 {
compatible = "marvell,armada-375-pp2";
@@ -57,3 +70,30 @@ ethernet@f0000 {
phy-mode = "gmii";
};
};
+
+Example for marvell,armada-7k-pp2:
+
+cpm_ethernet: ethernet@0 {
+ compatible = "marvell,armada-7k-pp22";
+ reg = <0x0 0x100000>, <0x129000 0xb000>;
+ clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
+ clock-names = "pp_clk", "gop_clk", "gp_clk";
+
+ eth0: eth0 {
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <0>;
+ gop-port-id = <0>;
+ };
+
+ eth1: eth1 {
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <1>;
+ gop-port-id = <2>;
+ };
+
+ eth2: eth2 {
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <2>;
+ gop-port-id = <3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index d3bfc2b30fb5..a7b0e41cb264 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -28,9 +28,9 @@ Optional properties:
clocks may be specified in derived bindings.
- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth" and the second one should be "pclk".
-- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
- available this clock is used for programming the Timestamp Addend Register.
- If not passed then the system clock will be used and this is fine on some
+- ptp_ref: this is the PTP reference clock; in case of the PTP is available
+ this clock is used for programming the Timestamp Addend Register. If not
+ passed then the system clock will be used and this is fine on some
platforms.
- tx-fifo-depth: See ethernet.txt file in the same directory
- rx-fifo-depth: See ethernet.txt file in the same directory
@@ -72,7 +72,35 @@ Optional properties:
- snps,mb: mixed-burst
- snps,rb: rebuild INCRx Burst
- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
-
+- Multiple RX Queues parameters: below the list of all the parameters to
+ configure the multiple RX queues:
+ - snps,rx-queues-to-use: number of RX queues to be used in the driver
+ - Choose one of these RX scheduling algorithms:
+ - snps,rx-sched-sp: Strict priority
+ - snps,rx-sched-wsp: Weighted Strict priority
+ - For each RX queue
+ - Choose one of these modes:
+ - snps,dcb-algorithm: Queue to be enabled as DCB
+ - snps,avb-algorithm: Queue to be enabled as AVB
+ - snps,map-to-dma-channel: Channel to map
+- Multiple TX Queues parameters: below the list of all the parameters to
+ configure the multiple TX queues:
+ - snps,tx-queues-to-use: number of TX queues to be used in the driver
+ - Choose one of these TX scheduling algorithms:
+ - snps,tx-sched-wrr: Weighted Round Robin
+ - snps,tx-sched-wfq: Weighted Fair Queuing
+ - snps,tx-sched-dwrr: Deficit Weighted Round Robin
+ - snps,tx-sched-sp: Strict priority
+ - For each TX queue
+ - snps,weight: TX queue weight (if using a DCB weight algorithm)
+ - Choose one of these modes:
+ - snps,dcb-algorithm: TX queue will be working in DCB
+ - snps,avb-algorithm: TX queue will be working in AVB
+ - Configure Credit Base Shaper (if AVB Mode selected):
+ - snps,send_slope: enable Low Power Interface
+ - snps,idle_slope: unlock on WoL
+ - snps,high_credit: max write outstanding req. limit
+ - snps,low_credit: max read outstanding req. limit
Examples:
stmmac_axi_setup: stmmac-axi-config {
@@ -81,6 +109,32 @@ Examples:
snps,blen = <256 128 64 32 0 0 0>;
};
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ snps,rx-sched-sp;
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <2>;
+ snps,tx-sched-wrr;
+ queue0 {
+ snps,weight = <0x10>;
+ snps,dcb-algorithm;
+ };
+
+ queue1 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3E800>;
+ snps,low_credit = <0xFFC18000>;
+ };
+ };
+
gmac0: ethernet@e0800000 {
compatible = "st,spear600-gmac";
reg = <0xe0800000 0x8000>;
@@ -104,4 +158,6 @@ Examples:
phy1: ethernet-phy@0 {
};
};
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
};
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 15d8d16934fd..2f24a1912a48 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -19,6 +19,25 @@ platform_labels - INTEGER
Possible values: 0 - 1048575
Default: 0
+ip_ttl_propagate - BOOL
+ Control whether TTL is propagated from the IPv4/IPv6 header to
+ the MPLS header on imposing labels and propagated from the
+ MPLS header to the IPv4/IPv6 header on popping the last label.
+
+ If disabled, the MPLS transport network will appear as a
+ single hop to transit traffic.
+
+ 0 - disabled / RFC 3443 [Short] Pipe Model
+ 1 - enabled / RFC 3443 Uniform Model (default)
+
+default_ttl - BOOL
+ Default TTL value to use for MPLS packets where it cannot be
+ propagated from an IP header, either because one isn't present
+ or ip_ttl_propagate has been disabled.
+
+ Possible values: 1 - 255
+ Default: 255
+
conf/<interface>/input - BOOL
Control whether packets can be input on this interface.
diff --git a/MAINTAINERS b/MAINTAINERS
index c776906f67a9..cefda30ed704 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -902,6 +902,12 @@ F: drivers/net/phy/mdio-xgene.c
F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
+APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
+M: Iyappan Subramanian <isubramanian@apm.com>
+M: Keyur Chudgar <kchudgar@apm.com>
+S: Supported
+F: drivers/net/ethernet/apm/xgene-v2/
+
APPLIED MICRO (APM) X-GENE SOC PMU
M: Tai Nguyen <ttnguyen@apm.com>
S: Supported
@@ -11061,6 +11067,12 @@ F: include/linux/dma/dw.h
F: include/linux/platform_data/dma-dw.h
F: drivers/dma/dw/
+SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
+M: Jie Deng <jiedeng@synopsys.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/synopsys/
+
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 4a610795b585..906705e5f776 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2267,9 +2267,8 @@ static int amb_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
- init_timer(&dev->housekeeping);
- dev->housekeeping.function = do_housekeeping;
- dev->housekeeping.data = (unsigned long) dev;
+ setup_timer(&dev->housekeeping, do_housekeeping,
+ (unsigned long)dev);
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 87799e81af97..c3f1a9e33cef 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,6 +32,8 @@
#include "hyperv_vmbus.h"
+#define VMBUS_PKT_TRAILER 8
+
/*
* When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol:
@@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
return 0;
}
+static inline void
+init_cached_read_index(struct hv_ring_buffer_info *rbi)
+{
+ rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw)
@@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret;
}
- init_cached_read_index(channel);
+ init_cached_read_index(inring_info);
+
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),
@@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret;
}
+
+/*
+ * Determine number of bytes available in ring buffer after
+ * the current iterator (priv_read_index) location.
+ *
+ * This is similar to hv_get_bytes_to_read but with private
+ * read index instead.
+ */
+static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
+{
+ u32 priv_read_loc = rbi->priv_read_index;
+ u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+
+ if (write_loc >= priv_read_loc)
+ return write_loc - priv_read_loc;
+ else
+ return (rbi->ring_datasize - priv_read_loc) + write_loc;
+}
+
+/*
+ * Get first vmbus packet from ring buffer after read_index
+ *
+ * If ring buffer is empty, returns NULL and no other action needed.
+ */
+struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ /* set state for later hv_signal_on_read() */
+ init_cached_read_index(rbi);
+
+ if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+ return NULL;
+
+ return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+}
+EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
+
+/*
+ * Get next vmbus packet from ring buffer.
+ *
+ * Advances the current location (priv_read_index) and checks for more
+ * data. If the end of the ring buffer is reached, then return NULL.
+ */
+struct vmpacket_descriptor *
+__hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *desc)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 packetlen = desc->len8 << 3;
+ u32 dsize = rbi->ring_datasize;
+
+ /* bump offset to next potential packet */
+ rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+ if (rbi->priv_read_index >= dsize)
+ rbi->priv_read_index -= dsize;
+
+ /* more data? */
+ if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+ return NULL;
+ else
+ return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+}
+EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
+
+/*
+ * Update host ring buffer after iterating over packets.
+ */
+void hv_pkt_iter_close(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ /*
+ * Make sure all reads are done before we update the read index since
+ * the writer may start writing to the read area once the read index
+ * is updated.
+ */
+ virt_rmb();
+ rbi->ring_buffer->read_index = rbi->priv_read_index;
+
+ hv_signal_on_read(channel);
+}
+EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index b9b47e5cc8b3..ced0461d6e9f 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2
struct qedr_dev *dev = (struct qedr_dev *)context;
- union event_ring_data *data = fw_handle;
- u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
- data->roce_handle.lo;
+ struct regpair *async_handle = (struct regpair *)fw_handle;
+ u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event;
struct ib_cq *ibcq;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index bb32e4792ec9..5cb9195513bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -38,7 +38,8 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_roce_if.h>
#include <linux/qed/qede_roce.h>
-#include "qedr_hsi.h"
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 699632893dd9..a6280ce3e2a5 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -43,14 +43,11 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "qedr_hsi.h"
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_roce_if.h>
#include "qedr.h"
-#include "qedr_hsi.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_hsi.h"
#include "qedr_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
deleted file mode 100644
index 66d27521373f..000000000000
--- a/drivers/infiniband/hw/qedr/qedr_hsi.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __QED_HSI_ROCE__
-#define __QED_HSI_ROCE__
-
-#include <linux/qed/common_hsi.h>
-#include <linux/qed/roce_common.h>
-#include "qedr_hsi_rdma.h"
-
-/* Affiliated asynchronous events / errors enumeration */
-enum roce_async_events_type {
- ROCE_ASYNC_EVENT_NONE = 0,
- ROCE_ASYNC_EVENT_COMM_EST = 1,
- ROCE_ASYNC_EVENT_SQ_DRAINED,
- ROCE_ASYNC_EVENT_SRQ_LIMIT,
- ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
- ROCE_ASYNC_EVENT_CQ_ERR,
- ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
- ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
- ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
- ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
- ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
- ROCE_ASYNC_EVENT_SRQ_EMPTY,
- MAX_ROCE_ASYNC_EVENTS_TYPE
-};
-
-#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6b3bb32803bd..2091902848e6 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -43,7 +43,8 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "qedr_hsi.h"
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ffad660..431926bba9f4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1052,8 +1052,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_state = AD_RX_INITIALIZE;
port->sm_vars |= AD_PORT_CHURNED;
/* check if port is not enabled */
- } else if (!(port->sm_vars & AD_PORT_BEGIN)
- && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
+ } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled)
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* check if new lacpdu arrived */
else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
@@ -1081,11 +1080,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* if no lacpdu arrived and no timer is on */
switch (port->sm_rx_state) {
case AD_RX_PORT_DISABLED:
- if (port->sm_vars & AD_PORT_MOVED)
- port->sm_rx_state = AD_RX_INITIALIZE;
- else if (port->is_enabled
- && (port->sm_vars
- & AD_PORT_LACP_ENABLED))
+ if (port->is_enabled &&
+ (port->sm_vars & AD_PORT_LACP_ENABLED))
port->sm_rx_state = AD_RX_EXPIRED;
else if (port->is_enabled
&& ((port->sm_vars
@@ -1115,7 +1111,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
- port->sm_vars &= ~AD_PORT_MOVED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* Fall Through */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8a4ba8b88e52..ba934020dfaa 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -201,12 +201,6 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
-static __be32 arp_target[BOND_MAX_ARP_TARGETS];
-static int arp_ip_count;
-static int bond_mode = BOND_MODE_ROUNDROBIN;
-static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
-static int lacp_fast;
-
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -2575,10 +2569,8 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
-static void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct bonding *bond)
{
- struct bonding *bond = container_of(work, struct bonding,
- arp_work.work);
struct slave *slave, *oldcurrent;
struct list_head *iter;
int do_failover = 0, slave_state_changed = 0;
@@ -2916,10 +2908,8 @@ check_state:
return should_notify_rtnl;
}
-static void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct bonding *bond)
{
- struct bonding *bond = container_of(work, struct bonding,
- arp_work.work);
bool should_notify_peers = false;
bool should_notify_rtnl = false;
int delta_in_ticks;
@@ -2972,6 +2962,17 @@ re_arm:
}
}
+static void bond_arp_monitor(struct work_struct *work)
+{
+ struct bonding *bond = container_of(work, struct bonding,
+ arp_work.work);
+
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond_activebackup_arp_mon(bond);
+ else
+ bond_loadbalance_arp_mon(bond);
+}
+
/*-------------------------- netdev event handling --------------------------*/
/* Change device name */
@@ -3228,10 +3229,7 @@ static void bond_work_init_all(struct bonding *bond)
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
- if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
- INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
- else
- INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
+ INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
@@ -3266,8 +3264,6 @@ static int bond_open(struct net_device *bond_dev)
}
}
- bond_work_init_all(bond);
-
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
@@ -4252,6 +4248,12 @@ static int bond_check_params(struct bond_params *params)
int arp_all_targets_value;
u16 ad_actor_sys_prio = 0;
u16 ad_user_port_key = 0;
+ __be32 arp_target[BOND_MAX_ARP_TARGETS];
+ int arp_ip_count;
+ int bond_mode = BOND_MODE_ROUNDROBIN;
+ int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
+ int lacp_fast = 0;
+ int tlb_dynamic_lb = 0;
/* Convert string parameters. */
if (mode) {
@@ -4564,6 +4566,17 @@ static int bond_check_params(struct bond_params *params)
}
ad_user_port_key = valptr->value;
+ if (bond_mode == BOND_MODE_TLB) {
+ bond_opt_initstr(&newval, "default");
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No tlb_dynamic_lb default value");
+ return -EINVAL;
+ }
+ tlb_dynamic_lb = valptr->value;
+ }
+
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4591,7 +4604,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
- params->tlb_dynamic_lb = 1; /* Default value */
+ params->tlb_dynamic_lb = tlb_dynamic_lb;
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
@@ -4687,6 +4700,8 @@ int bond_create(struct net *net, const char *name)
netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
+
rtnl_unlock();
if (res < 0)
bond_destructor(bond_dev);
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index c36be318de1a..31d37a90cec7 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
mv88e6xxx-objs += global1.o
+mv88e6xxx-objs += global1_atu.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 03dc886ed3d6..3354f99df378 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -687,11 +687,6 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6165;
}
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6320;
-}
-
static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6341;
@@ -1066,11 +1061,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
-{
- return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
-}
-
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
@@ -1130,122 +1120,6 @@ out:
return err;
}
-static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
-{
- u16 val;
- int err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
- if (err)
- return err;
- } else if (mv88e6xxx_num_databases(chip) == 256) {
- /* ATU DBNum[7:4] are located in ATU Control 15:12 */
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
- (val & 0xfff) | ((fid << 8) & 0xf000));
- if (err)
- return err;
-
- /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
- cmd |= fid & 0xf;
- }
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
- if (err)
- return err;
-
- return _mv88e6xxx_atu_wait(chip);
-}
-
-static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry)
-{
- u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
-
- if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- unsigned int mask, shift;
-
- if (entry->trunk) {
- data |= GLOBAL_ATU_DATA_TRUNK;
- mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
- shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
- } else {
- mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
- shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
- }
-
- data |= (entry->portv_trunkid << shift) & mask;
- }
-
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
-}
-
-static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry,
- bool static_too)
-{
- int op;
- int err;
-
- err = _mv88e6xxx_atu_wait(chip);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_data_write(chip, entry);
- if (err)
- return err;
-
- if (entry->fid) {
- op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
- GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
- } else {
- op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
- GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
- }
-
- return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
-}
-
-static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
- u16 fid, bool static_too)
-{
- struct mv88e6xxx_atu_entry entry = {
- .fid = fid,
- .state = 0, /* EntryState bits must be 0 */
- };
-
- return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
-
-static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
- int from_port, int to_port, bool static_too)
-{
- struct mv88e6xxx_atu_entry entry = {
- .trunk = false,
- .fid = fid,
- };
-
- /* EntryState bits must be 0xF */
- entry.state = GLOBAL_ATU_DATA_STATE_MASK;
-
- /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
- entry.portv_trunkid = (to_port & 0x0f) << 4;
- entry.portv_trunkid |= from_port & 0x0f;
-
- return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
-
-static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
- int port, bool static_too)
-{
- /* Destination port 0xF means remove the entries */
- return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
-}
-
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -1306,13 +1180,28 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
netdev_err(ds->ports[port].netdev, "failed to update state\n");
}
+static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_flush(chip, 0, true);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
+}
+
static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
mutex_unlock(&chip->reg_lock);
if (err)
@@ -1662,7 +1551,7 @@ loadpurge:
return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_entry vlan;
@@ -1703,7 +1592,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
return -ENOSPC;
/* Clear the database */
- return _mv88e6xxx_atu_flush(chip, *fid, true);
+ return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
@@ -1716,7 +1605,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
};
int i, err;
- err = _mv88e6xxx_fid_new(chip, &vlan.fid);
+ err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -1964,7 +1853,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
+ return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
@@ -2001,96 +1890,6 @@ unlock:
return err;
}
-static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
- const unsigned char *addr)
-{
- int i, err;
-
- for (i = 0; i < 3; i++) {
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
- (addr[i * 2] << 8) | addr[i * 2 + 1]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
- unsigned char *addr)
-{
- u16 val;
- int i, err;
-
- for (i = 0; i < 3; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
- if (err)
- return err;
-
- addr[i * 2] = val >> 8;
- addr[i * 2 + 1] = val & 0xff;
- }
-
- return 0;
-}
-
-static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry)
-{
- int ret;
-
- ret = _mv88e6xxx_atu_wait(chip);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_atu_data_write(chip, entry);
- if (ret < 0)
- return ret;
-
- return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
-}
-
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
- struct mv88e6xxx_atu_entry *entry);
-
-static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
- const u8 *addr, struct mv88e6xxx_atu_entry *entry)
-{
- struct mv88e6xxx_atu_entry next;
- int err;
-
- memcpy(next.mac, addr, ETH_ALEN);
- eth_addr_dec(next.mac);
-
- err = _mv88e6xxx_atu_mac_write(chip, next.mac);
- if (err)
- return err;
-
- do {
- err = _mv88e6xxx_atu_getnext(chip, fid, &next);
- if (err)
- return err;
-
- if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
- break;
-
- if (ether_addr_equal(next.mac, addr)) {
- *entry = next;
- return 0;
- }
- } while (ether_addr_greater(addr, next.mac));
-
- memset(entry, 0, sizeof(*entry));
- entry->fid = fid;
- ether_addr_copy(entry->mac, addr);
-
- return 0;
-}
-
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
@@ -2107,21 +1906,32 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+ entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ ether_addr_copy(entry.mac, addr);
+ eth_addr_dec(entry.mac);
+
+ err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
if (err)
return err;
+ /* Initialize a fresh ATU entry if it isn't found */
+ if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED ||
+ !ether_addr_equal(entry.mac, addr)) {
+ memset(&entry, 0, sizeof(entry));
+ ether_addr_copy(entry.mac, addr);
+ }
+
/* Purge the ATU entry only if no port is using it anymore */
if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry.portv_trunkid &= ~BIT(port);
- if (!entry.portv_trunkid)
+ entry.portvec &= ~BIT(port);
+ if (!entry.portvec)
entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
} else {
- entry.portv_trunkid |= BIT(port);
+ entry.portvec |= BIT(port);
entry.state = state;
}
- return _mv88e6xxx_atu_load(chip, &entry);
+ return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
}
static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
@@ -2161,75 +1971,26 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
return err;
}
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
- struct mv88e6xxx_atu_entry *entry)
-{
- struct mv88e6xxx_atu_entry next = { 0 };
- u16 val;
- int err;
-
- next.fid = fid;
-
- err = _mv88e6xxx_atu_wait(chip);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_mac_read(chip, next.mac);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
- if (err)
- return err;
-
- next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
- if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- unsigned int mask, shift;
-
- if (val & GLOBAL_ATU_DATA_TRUNK) {
- next.trunk = true;
- mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
- shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
- } else {
- next.trunk = false;
- mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
- shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
- }
-
- next.portv_trunkid = (val & mask) >> shift;
- }
-
- *entry = next;
- return 0;
-}
-
static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
struct switchdev_obj *obj,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_atu_entry addr = {
- .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- };
+ struct mv88e6xxx_atu_entry addr;
int err;
- err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
- if (err)
- return err;
+ addr.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ eth_broadcast_addr(addr.mac);
do {
- err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
if (err)
return err;
if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
break;
- if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0)
+ if (addr.trunk || (addr.portvec & BIT(port)) == 0)
continue;
if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) {
@@ -2433,70 +2194,85 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
return err;
}
-static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port,
- int upstream_port)
+static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode frame, u16 egress,
+ u16 etype)
{
int err;
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_DSA);
+ if (!chip->info->ops->port_set_frame_mode)
+ return -EOPNOTSUPP;
+
+ err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
+ if (err)
+ return err;
+
+ err = chip->info->ops->port_set_frame_mode(chip, port, frame);
if (err)
return err;
- return chip->info->ops->port_set_egress_unknowns(
- chip, port, port == upstream_port);
+ if (chip->info->ops->port_set_ether_type)
+ return chip->info->ops->port_set_ether_type(chip, port, etype);
+
+ return 0;
}
-static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
{
- int err;
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
+ PORT_CONTROL_EGRESS_UNMODIFIED,
+ PORT_ETH_TYPE_DEFAULT);
+}
- switch (chip->info->tag_protocol) {
- case DSA_TAG_PROTO_EDSA:
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE);
- if (err)
- return err;
+static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
+ PORT_CONTROL_EGRESS_UNMODIFIED,
+ PORT_ETH_TYPE_DEFAULT);
+}
- err = mv88e6xxx_port_set_egress_mode(
- chip, port, PORT_CONTROL_EGRESS_ADD_TAG);
- if (err)
- return err;
+static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+ PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
+}
- if (chip->info->ops->port_set_ether_type)
- err = chip->info->ops->port_set_ether_type(
- chip, port, ETH_P_EDSA);
- break;
+static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
+{
+ if (dsa_is_dsa_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
- case DSA_TAG_PROTO_DSA:
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_DSA);
- if (err)
- return err;
+ if (dsa_is_normal_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_normal(chip, port);
- err = mv88e6xxx_port_set_egress_mode(
- chip, port, PORT_CONTROL_EGRESS_UNMODIFIED);
- break;
- default:
- err = -EINVAL;
- }
+ /* Setup CPU port mode depending on its supported tag format */
+ if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA)
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
- if (err)
- return err;
+ if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA)
+ return mv88e6xxx_set_port_mode_edsa(chip, port);
- return chip->info->ops->port_set_egress_unknowns(chip, port, true);
+ return -EINVAL;
}
-static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
{
- int err;
+ bool message = dsa_is_dsa_port(chip->ds, port);
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_NORMAL);
- if (err)
- return err;
+ return mv88e6xxx_port_set_message_port(chip, port, message);
+}
- return chip->info->ops->port_set_egress_unknowns(chip, port, false);
+static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
+{
+ bool flood = port == dsa_upstream_port(chip->ds);
+
+ /* Upstream ports flood frames with unknown unicast or multicast DA */
+ if (chip->info->ops->port_set_egress_floods)
+ return chip->info->ops->port_set_egress_floods(chip, port,
+ flood, flood);
+
+ return 0;
}
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
@@ -2541,14 +2317,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- if (dsa_is_cpu_port(ds, port)) {
- err = mv88e6xxx_setup_port_cpu(chip, port);
- } else if (dsa_is_dsa_port(ds, port)) {
- err = mv88e6xxx_setup_port_dsa(chip, port,
- dsa_upstream_port(ds));
- } else {
- err = mv88e6xxx_setup_port_normal(chip, port);
- }
+ err = mv88e6xxx_setup_port_mode(chip, port);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_egress_floods(chip, port);
if (err)
return err;
@@ -2623,20 +2396,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
- /* Port ATU control: disable limiting the number of
- * address database entries that this port is allowed
- * to use.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
- 0x0000);
- /* Priority Override: disable DA, SA and VTU priority
- * override.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
- 0x0000);
+ if (chip->info->ops->port_disable_learn_limit) {
+ err = chip->info->ops->port_disable_learn_limit(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_disable_pri_override) {
+ err = chip->info->ops->port_disable_pri_override(chip, port);
if (err)
return err;
}
@@ -2653,10 +2420,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- /* Port Control 1: disable trunking, disable sending
- * learning messages to this port.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+ err = mv88e6xxx_setup_message_port(chip, port);
if (err)
return err;
@@ -2697,33 +2461,6 @@ static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return 0;
}
-static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
- unsigned int msecs)
-{
- const unsigned int coeff = chip->info->age_time_coeff;
- const unsigned int min = 0x01 * coeff;
- const unsigned int max = 0xff * coeff;
- u8 age_time;
- u16 val;
- int err;
-
- if (msecs < min || msecs > max)
- return -ERANGE;
-
- /* Round to nearest multiple of coeff */
- age_time = (msecs + coeff / 2) / coeff;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
- if (err)
- return err;
-
- /* AgeTime is 11:4 bits */
- val &= ~0xff0;
- val |= age_time << 4;
-
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
-}
-
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -2731,7 +2468,7 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_g1_set_age_time(chip, ageing_time);
+ err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
mutex_unlock(&chip->reg_lock);
return err;
@@ -2774,24 +2511,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
if (err < 0)
return err;
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARN2ALL);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_set_age_time(chip, 300000);
- if (err)
- return err;
-
- /* Clear all ATU entries */
- err = _mv88e6xxx_atu_flush(chip, 0, true);
- if (err)
- return err;
-
/* Configure the IP ToS mapping registers. */
err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
if (err)
@@ -2872,6 +2591,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
+ err = mv88e6xxx_atu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Some generations have the configuration of sending reserved
* management frames to the CPU in global2, others in
* global1. Hence it does not fit the two setup functions
@@ -3101,10 +2824,12 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3127,7 +2852,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3149,11 +2874,13 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3174,7 +2901,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3196,7 +2925,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_jumbo_config = mv88e6165_port_jumbo_config,
@@ -3225,11 +2954,13 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3249,6 +2980,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3271,11 +3004,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3300,11 +3035,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3327,11 +3064,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3356,11 +3095,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3381,7 +3122,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3410,9 +3151,11 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3438,9 +3181,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_set_speed = mv88e6390x_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3466,9 +3211,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3494,11 +3241,13 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3523,10 +3272,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3551,11 +3302,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3578,11 +3331,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3603,11 +3358,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3630,11 +3387,13 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3659,11 +3418,13 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3688,11 +3449,13 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3717,11 +3480,13 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3746,12 +3511,14 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,11 +3544,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_speed = mv88e6390x_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3807,9 +3576,11 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3822,22 +3593,6 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
.reset = mv88e6352_g1_reset,
};
-static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_ops *ops)
-{
- if (!ops->port_set_frame_mode) {
- dev_err(chip->dev, "Missing port_set_frame_mode");
- return -EINVAL;
- }
-
- if (!ops->port_set_egress_unknowns) {
- dev_err(chip->dev, "Missing port_set_egress_mode");
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3849,6 +3604,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6085_ops,
@@ -3864,6 +3620,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
.ops = &mv88e6095_ops,
@@ -3879,6 +3636,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6097_ops,
@@ -3894,6 +3652,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6123_ops,
@@ -3909,6 +3668,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6131_ops,
@@ -3924,6 +3684,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6161_ops,
@@ -3939,6 +3700,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6165_ops,
@@ -3954,6 +3716,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6171_ops,
@@ -3969,6 +3732,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6172_ops,
@@ -3984,6 +3748,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6175_ops,
@@ -3999,6 +3764,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6176_ops,
@@ -4014,6 +3780,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6185_ops,
@@ -4030,6 +3797,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.tag_protocol = DSA_TAG_PROTO_DSA,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6190_ops,
},
@@ -4044,6 +3812,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6190x_ops,
@@ -4059,6 +3828,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6391_ops,
@@ -4074,6 +3844,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6240_ops,
@@ -4089,6 +3860,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6290_ops,
@@ -4104,6 +3876,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6320_ops,
@@ -4119,6 +3892,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6321_ops,
@@ -4133,6 +3907,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6341,
.ops = &mv88e6141_ops,
@@ -4147,6 +3922,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6341,
.ops = &mv88e6341_ops,
@@ -4162,6 +3938,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6350_ops,
@@ -4177,6 +3954,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6351_ops,
@@ -4192,6 +3970,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6352_ops,
@@ -4206,6 +3985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6390_ops,
@@ -4220,6 +4000,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6390x_ops,
@@ -4502,10 +4283,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops);
- if (err)
- return err;
-
err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1aec7382c02d..eece7418e67d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -38,4 +38,15 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs);
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all);
+
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
new file mode 100644
index 000000000000..120b7f41a735
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -0,0 +1,300 @@
+/*
+ * Marvell 88E6xxx Address Translation Unit (ATU) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+/* Offset 0x01: ATU FID Register */
+
+static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff);
+}
+
+/* Offset 0x0A: ATU Control Register */
+
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ if (learn2all)
+ val |= GLOBAL_ATU_CONTROL_LEARN2ALL;
+ else
+ val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+}
+
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs)
+{
+ const unsigned int coeff = chip->info->age_time_coeff;
+ const unsigned int min = 0x01 * coeff;
+ const unsigned int max = 0xff * coeff;
+ u8 age_time;
+ u16 val;
+ int err;
+
+ if (msecs < min || msecs > max)
+ return -ERANGE;
+
+ /* Round to nearest multiple of coeff */
+ age_time = (msecs + coeff / 2) / coeff;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ /* AgeTime is 11:4 bits */
+ val &= ~0xff0;
+ val |= age_time << 4;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+}
+
+/* Offset 0x0B: ATU Operation Register */
+
+static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
+}
+
+static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+{
+ u16 val;
+ int err;
+
+ /* FID bits are dispatched all around gradually as more are supported */
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_atu_fid_write(chip, fid);
+ if (err)
+ return err;
+ } else {
+ if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ val = (val & 0x0fff) | ((fid << 8) & 0xf000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ if (err)
+ return err;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ op |= fid & 0xf;
+ }
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
+/* Offset 0x0C: ATU Data Register */
+
+static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ if (err)
+ return err;
+
+ entry->state = val & 0xf;
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (val & GLOBAL_ATU_DATA_TRUNK)
+ entry->trunk = true;
+
+ entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 data = entry->state & 0xf;
+
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (entry->trunk)
+ data |= GLOBAL_ATU_DATA_TRUNK;
+
+ data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
+ }
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
+}
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+
+static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ if (err)
+ return err;
+
+ entry->mac[i * 2] = val >> 8;
+ entry->mac[i * 2 + 1] = val & 0xff;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Address Translation Unit operations */
+
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* Write the MAC address to iterate from only once */
+ if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_mac_read(chip, entry);
+}
+
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB);
+}
+
+static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry,
+ bool all)
+{
+ u16 op;
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* Flush/Move all or non-static entries from all or a given database */
+ if (all && fid)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB;
+ else if (fid)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+ else if (all)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL;
+ else
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, op);
+}
+
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = {
+ .state = 0, /* Null EntryState means Flush */
+ };
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
+ int from_port, int to_port, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ unsigned long mask;
+ int shift;
+
+ if (!chip->info->atu_move_port_mask)
+ return -EOPNOTSUPP;
+
+ mask = chip->info->atu_move_port_mask;
+ shift = bitmap_weight(&mask, 16);
+
+ entry.state = 0xf, /* Full EntryState means Move */
+ entry.portvec = from_port & mask;
+ entry.portvec |= (to_port & mask) << shift;
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all)
+{
+ int from_port = port;
+ int to_port = chip->info->atu_move_port_mask;
+
+ return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 6033f2f6260a..75be2c339a49 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -132,18 +132,19 @@
#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
#define PORT_CONTROL_USE_IP BIT(5)
#define PORT_CONTROL_USE_TAG BIT(4)
-#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA (0x0 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA (0x1 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA (0x2 << 2)
-#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_MASK (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA (0x0 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA (0x1 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA (0x2 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA (0x3 << 2)
#define PORT_CONTROL_STATE_MASK 0x03
#define PORT_CONTROL_STATE_DISABLED 0x00
#define PORT_CONTROL_STATE_BLOCKING 0x01
#define PORT_CONTROL_STATE_LEARNING 0x02
#define PORT_CONTROL_STATE_FORWARDING 0x03
#define PORT_CONTROL_1 0x05
+#define PORT_CONTROL_1_MESSAGE_PORT BIT(15)
#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
#define PORT_BASE_VLAN 0x06
#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
@@ -166,7 +167,6 @@
#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
#define PORT_CONTROL_2_MAP_DA BIT(7)
#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
-#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f
@@ -181,6 +181,7 @@
#define PORT_ATU_CONTROL 0x0c
#define PORT_PRI_OVERRIDE 0x0d
#define PORT_ETH_TYPE 0x0f
+#define PORT_ETH_TYPE_DEFAULT 0x9100
#define PORT_IN_DISCARD_LO 0x10
#define PORT_IN_DISCARD_HI 0x11
#define PORT_IN_FILTERED 0x12
@@ -551,7 +552,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
-#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
@@ -594,8 +594,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -608,8 +607,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -641,7 +639,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6341 \
(MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
@@ -654,8 +651,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_SERDES)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -669,7 +665,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6352 \
(MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
@@ -707,14 +702,18 @@ struct mv88e6xxx_info {
unsigned int g1_irqs;
enum dsa_tag_protocol tag_protocol;
unsigned long long flags;
+
+ /* Mask for FromPort and ToPort value of PortVec used in ATU Move
+ * operation. 0 means that the ATU Move operation is not supported.
+ */
+ u8 atu_move_port_mask;
const struct mv88e6xxx_ops *ops;
};
struct mv88e6xxx_atu_entry {
- u16 fid;
u8 state;
bool trunk;
- u16 portv_trunkid;
+ u16 portvec;
u8 mac[ETH_ALEN];
};
@@ -864,14 +863,16 @@ struct mv88e6xxx_ops {
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
- int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port,
- bool on);
+ int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
/* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
* Some chips allow this to be configured on specific ports.
@@ -944,6 +945,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
return chip->info->num_ports;
}
+static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
+{
+ return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 8875784c4718..d4868bb50ed5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -497,8 +497,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast)
{
int err;
u16 reg;
@@ -507,7 +507,7 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
+ if (unicast)
reg |= PORT_CONTROL_FORWARD_UNKNOWN;
else
reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
@@ -515,8 +515,8 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast)
{
int err;
u16 reg;
@@ -525,21 +525,45 @@ int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
- reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+ reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK;
+
+ if (unicast && multicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA;
+ else if (unicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
+ else if (multicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
else
- reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA;
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
/* Offset 0x05: Port Control 1 */
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val);
+ if (err)
+ return err;
+
+ if (message_port)
+ val |= PORT_CONTROL_1_MESSAGE_PORT;
+ else
+ val &= ~PORT_CONTROL_1_MESSAGE_PORT;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val);
+}
+
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
{
- const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+ const u16 mask = mv88e6xxx_port_mask(chip);
u16 reg;
int err;
@@ -672,8 +696,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast)
{
int err;
u16 reg;
@@ -682,14 +706,26 @@ int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
- reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ if (multicast)
+ reg |= PORT_CONTROL_2_DEFAULT_FORWARD;
else
- reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN;
+ reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD;
return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
}
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast)
+{
+ int err;
+
+ err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
+ if (err)
+ return err;
+
+ return mv88e6185_port_set_default_forward(chip, port, multicast);
+}
+
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
@@ -769,6 +805,20 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
}
+/* Offset 0x0C: Port ATU Control */
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0);
+}
+
+/* Offset 0x0D: (Priority) Override Register */
+
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0);
+}
+
/* Offset 0x0f: Port Ether type */
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c83cbb3f4491..c2425ddab287 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -56,14 +56,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port);
int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
@@ -75,4 +75,8 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
+
#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8c08f9deef92..edae15ac0e98 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 26dce5bf2c18..bf7f4502cabc 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
index ec63d706d464..59efe5b145dd 100644
--- a/drivers/net/ethernet/apm/Kconfig
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -1 +1,2 @@
source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
index 65ce32ad1b2c..946b2a4c882d 100644
--- a/drivers/net/ethernet/apm/Makefile
+++ b/drivers/net/ethernet/apm/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 000000000000..1205861b6318
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,11 @@
+config NET_XGENE_V2
+ tristate "APM X-Gene SoC Ethernet-v2 Driver"
+ depends on HAS_DMA
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This is the Ethernet driver for the on-chip ethernet interface
+ which uses a linked list of DMA descriptor architecture (v2) for
+ APM X-Gene SoCs.
+
+ To compile this driver as a module, choose M here. This module will
+ be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 000000000000..735309c0b8b1
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644
index 000000000000..b49edeeb6275
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.c
@@ -0,0 +1,71 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ return ioread32(addr);
+}
+
+int xge_port_reset(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_wr_csr(pdata, ENET_SRST, 0x3);
+ xge_wr_csr(pdata, ENET_SRST, 0x2);
+ xge_wr_csr(pdata, ENET_SRST, 0x0);
+
+ xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
+
+ return 0;
+}
+
+static void xge_traffic_resume(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
+ xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
+
+ xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
+ xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
+}
+
+int xge_port_init(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ pdata->phy_speed = SPEED_1000;
+ xge_mac_init(pdata);
+ xge_traffic_resume(ndev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644
index 000000000000..40371cfcfce4
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.h
@@ -0,0 +1,43 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ENET_H__
+#define __XGENE_ENET_V2_ENET_H__
+
+#define ENET_CLKEN 0xc008
+#define ENET_SRST 0xc000
+#define ENET_SHIM 0xc010
+#define CFG_MEM_RAM_SHUTDOWN 0xd070
+#define BLOCK_MEM_RDY 0xd074
+
+#define DEVM_ARAUX_COH BIT(19)
+#define DEVM_AWAUX_COH BIT(3)
+
+#define CFG_FORCE_LINK_STATUS_EN 0x229c
+#define FORCE_LINK_STATUS 0x22a0
+#define CFG_LINK_AGGR_RESUME 0x27c8
+#define RX_DV_GATE_REG 0x2dfc
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
+int xge_port_reset(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644
index 000000000000..c3189de3df55
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -0,0 +1,116 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_mac_reset(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
+ xge_wr_csr(pdata, MAC_CONFIG_1, 0);
+}
+
+static void xge_mac_set_speed(struct xge_pdata *pdata)
+{
+ u32 icm0, icm2, ecm0, mc2;
+ u32 intf_ctrl, rgmii;
+
+ icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
+ icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
+ ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
+ rgmii = xge_rd_csr(pdata, RGMII_REG_0);
+ mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
+ intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
+ icm2 |= CFG_WAITASYNCRD_EN;
+
+ switch (pdata->phy_speed) {
+ case SPEED_10:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 0);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ case SPEED_100:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 1);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ default:
+ SET_REG_BITS(&mc2, INTF_MODE, 2);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 2);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
+ break;
+ }
+
+ mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
+ SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
+
+ xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
+ xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
+ xge_wr_csr(pdata, RGMII_REG_0, rgmii);
+ xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
+ xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
+ xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
+}
+
+void xge_mac_set_station_addr(struct xge_pdata *pdata)
+{
+ u8 *dev_addr = pdata->ndev->dev_addr;
+ u32 addr0, addr1;
+
+ addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+ addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+ xge_wr_csr(pdata, STATION_ADDR0, addr0);
+ xge_wr_csr(pdata, STATION_ADDR1, addr1);
+}
+
+void xge_mac_init(struct xge_pdata *pdata)
+{
+ xge_mac_reset(pdata);
+ xge_mac_set_speed(pdata);
+ xge_mac_set_station_addr(pdata);
+}
+
+void xge_mac_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data |= TX_EN | RX_EN;
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+}
+
+void xge_mac_disable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data &= ~(TX_EN | RX_EN);
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644
index 000000000000..0fce6ae15ce0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.h
@@ -0,0 +1,110 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAC_H__
+#define __XGENE_ENET_V2_MAC_H__
+
+/* Register offsets */
+#define MAC_CONFIG_1 0xa000
+#define MAC_CONFIG_2 0xa004
+#define MII_MGMT_CONFIG 0xa020
+#define MII_MGMT_COMMAND 0xa024
+#define MII_MGMT_ADDRESS 0xa028
+#define MII_MGMT_CONTROL 0xa02c
+#define MII_MGMT_STATUS 0xa030
+#define MII_MGMT_INDICATORS 0xa034
+#define INTERFACE_CONTROL 0xa038
+#define STATION_ADDR0 0xa040
+#define STATION_ADDR1 0xa044
+#define RBYT 0xa09c
+#define RPKT 0xa0a0
+#define RFCS 0xa0a4
+
+#define RGMII_REG_0 0x27e0
+#define ICM_CONFIG0_REG_0 0x2c00
+#define ICM_CONFIG2_REG_0 0x2c08
+#define ECM_CONFIG0_REG_0 0x2d00
+
+/* Register fields */
+#define SOFT_RESET BIT(31)
+#define TX_EN BIT(0)
+#define RX_EN BIT(2)
+#define PAD_CRC BIT(2)
+#define CRC_EN BIT(1)
+#define FULL_DUPLEX BIT(0)
+
+#define INTF_MODE_POS 8
+#define INTF_MODE_LEN 2
+#define HD_MODE_POS 25
+#define HD_MODE_LEN 2
+#define CFG_MACMODE_POS 18
+#define CFG_MACMODE_LEN 2
+#define CFG_WAITASYNCRD_POS 0
+#define CFG_WAITASYNCRD_LEN 16
+#define CFG_SPEED_125_POS 24
+#define CFG_WFIFOFULLTHR_POS 0
+#define CFG_WFIFOFULLTHR_LEN 7
+#define MGMT_CLOCK_SEL_POS 0
+#define MGMT_CLOCK_SEL_LEN 3
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define MII_MGMT_BUSY BIT(0)
+#define MII_READ_CYCLE BIT(0)
+#define CFG_WAITASYNCRD_EN BIT(16)
+
+static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ *var &= ~mask;
+ *var |= ((val << pos) & mask);
+}
+
+static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ return (var & mask) >> pos;
+}
+
+#define SET_REG_BITS(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
+
+#define SET_REG_BIT(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, 1, val)
+
+#define GET_REG_BITS(var, field) \
+ xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
+
+#define GET_REG_BIT(var, field) ((var) & (field))
+
+struct xge_pdata;
+
+void xge_mac_reset(struct xge_pdata *pdata);
+void xge_mac_enable(struct xge_pdata *pdata);
+void xge_mac_disable(struct xge_pdata *pdata);
+void xge_mac_init(struct xge_pdata *pdata);
+int xge_port_init(struct net_device *ndev);
+void xge_mac_set_station_addr(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644
index 000000000000..ae76977d10b4
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -0,0 +1,756 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static const struct acpi_device_id xge_acpi_match[];
+
+static int xge_get_resources(struct xge_pdata *pdata)
+{
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ int phy_mode, ret = 0;
+ struct resource *res;
+ struct device *dev;
+
+ pdev = pdata->pdev;
+ dev = &pdev->dev;
+ ndev = pdata->ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Resource enet_csr not defined\n");
+ return -ENODEV;
+ }
+
+ pdata->resources.base_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->resources.base_addr) {
+ dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+ return -ENOMEM;
+ }
+
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ eth_hw_addr_random(ndev);
+
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+ phy_mode = device_get_phy_mode(dev);
+ if (phy_mode < 0) {
+ dev_err(dev, "Unable to get phy-connection-type\n");
+ return phy_mode;
+ }
+ pdata->resources.phy_mode = phy_mode;
+
+ if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
+ dev_err(dev, "Incorrect phy-connection-type specified\n");
+ return -ENODEV;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(dev, "Unable to get ENET IRQ\n");
+ ret = ret ? : -ENXIO;
+ return ret;
+ }
+ pdata->resources.irq = ret;
+
+ return 0;
+}
+
+static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ const u8 slots = XGENE_ENET_NUM_DESC - 1;
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_raw_desc *raw_desc;
+ u64 addr_lo, addr_hi;
+ u8 tail = ring->tail;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 len;
+ int i;
+
+ for (i = 0; i < nbuf; i++) {
+ raw_desc = &ring->raw_desc[tail];
+
+ len = XGENE_ENET_STD_MTU;
+ skb = netdev_alloc_skb(ndev, len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ ring->pkt_info[tail].skb = skb;
+ ring->pkt_info[tail].dma_addr = dma_addr;
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ dma_wmb();
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(E, 1));
+ tail = (tail + 1) & slots;
+ }
+
+ ring->tail = tail;
+
+ return 0;
+}
+
+static int xge_init_hw(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_port_reset(ndev);
+ if (ret)
+ return ret;
+
+ xge_port_init(ndev);
+ pdata->nbufs = NUM_BUFS;
+
+ return 0;
+}
+
+static irqreturn_t xge_irq(const int irq, void *data)
+{
+ struct xge_pdata *pdata = data;
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ xge_intr_disable(pdata);
+ __napi_schedule(&pdata->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xge_request_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ int ret;
+
+ snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
+
+ ret = devm_request_irq(dev, pdata->resources.irq, xge_irq,
+ 0, pdata->irq_name, pdata);
+ if (ret)
+ netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
+
+ return ret;
+}
+
+static void xge_free_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+
+ devm_free_irq(dev, pdata->resources.irq, pdata);
+}
+
+static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
+ return true;
+
+ return false;
+}
+
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ static dma_addr_t dma_addr;
+ u64 addr_lo, addr_hi;
+ void *pkt_buf;
+ u8 tail;
+ u16 len;
+
+ tx_ring = pdata->tx_ring;
+ tail = tx_ring->tail;
+ len = skb_headlen(skb);
+ raw_desc = &tx_ring->raw_desc[tail];
+
+ if (!is_tx_slot_available(raw_desc)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Packet buffers should be 64B aligned */
+ pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!pkt_buf)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ memcpy(pkt_buf, skb->data, len);
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ tx_ring->pkt_info[tail].skb = skb;
+ tx_ring->pkt_info[tail].dma_addr = dma_addr;
+ tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
+
+ dma_wmb();
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(PKT_SIZE, len) |
+ SET_BITS(E, 0));
+ skb_tx_timestamp(skb);
+ xge_wr_csr(pdata, DMATXCTRL, 1);
+
+ tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+ return NETDEV_TX_OK;
+}
+
+static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_txc_poll(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ u32 data;
+ u8 head;
+
+ tx_ring = pdata->tx_ring;
+ head = tx_ring->head;
+
+ data = xge_rd_csr(pdata, DMATXSTATUS);
+ if (!GET_BITS(TXPKTCOUNT, data))
+ return;
+
+ while (1) {
+ raw_desc = &tx_ring->raw_desc[head];
+
+ if (!is_tx_hw_done(raw_desc))
+ break;
+
+ dma_rmb();
+
+ skb = tx_ring->pkt_info[head].skb;
+ dma_addr = tx_ring->pkt_info[head].dma_addr;
+ pkt_buf = tx_ring->pkt_info[head].pkt_buf;
+ pdata->stats.tx_packets++;
+ pdata->stats.tx_bytes += skb->len;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+
+ /* clear pktstart address and pktsize */
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ }
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *rx_ring;
+ struct xge_raw_desc *raw_desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int processed = 0;
+ u8 head, rx_error;
+ int i, ret;
+ u32 data;
+ u16 len;
+
+ rx_ring = pdata->rx_ring;
+ head = rx_ring->head;
+
+ data = xge_rd_csr(pdata, DMARXSTATUS);
+ if (!GET_BITS(RXPKTCOUNT, data))
+ return 0;
+
+ for (i = 0; i < budget; i++) {
+ raw_desc = &rx_ring->raw_desc[head];
+
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ break;
+
+ dma_rmb();
+
+ skb = rx_ring->pkt_info[head].skb;
+ rx_ring->pkt_info[head].skb = NULL;
+ dma_addr = rx_ring->pkt_info[head].dma_addr;
+ len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+
+ rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
+ if (unlikely(rx_error)) {
+ pdata->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ pdata->stats.rx_packets++;
+ pdata->stats.rx_bytes += len;
+ napi_gro_receive(&pdata->napi, skb);
+out:
+ ret = xge_refill_buffers(ndev, 1);
+ xge_wr_csr(pdata, DMARXSTATUS, 1);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+
+ if (ret)
+ break;
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ processed++;
+ }
+
+ rx_ring->head = head;
+
+ return processed;
+}
+
+static void xge_delete_desc_ring(struct net_device *ndev,
+ struct xge_desc_ring *ring)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ u16 size;
+
+ if (!ring)
+ return;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ if (ring->desc_addr)
+ dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
+
+ kfree(ring->pkt_info);
+ kfree(ring);
+}
+
+static void xge_free_buffers(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ struct device *dev = &pdata->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ skb = ring->pkt_info[i].skb;
+ dma_addr = ring->pkt_info[i].dma_addr;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_delete_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ xge_delete_desc_ring(ndev, pdata->tx_ring);
+
+ xge_rx_poll(ndev, 64);
+ xge_free_buffers(ndev);
+ xge_delete_desc_ring(ndev, pdata->rx_ring);
+}
+
+static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *ring;
+ u16 size;
+
+ ring = kzalloc(sizeof(struct xge_desc_ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->ndev = ndev;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
+ if (!ring->desc_addr)
+ goto err;
+
+ ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(struct pkt_info),
+ GFP_KERNEL);
+ if (!ring->pkt_info)
+ goto err;
+
+ xge_setup_desc(ring);
+
+ return ring;
+
+err:
+ xge_delete_desc_ring(ndev, ring);
+
+ return NULL;
+}
+
+static int xge_create_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring;
+ int ret;
+
+ /* create tx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->tx_ring = ring;
+ xge_update_tx_desc_addr(pdata);
+
+ /* create rx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->rx_ring = ring;
+ xge_update_rx_desc_addr(pdata);
+
+ ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xge_delete_desc_rings(ndev);
+
+ return -ENOMEM;
+}
+
+static int xge_open(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_create_desc_rings(ndev);
+ if (ret)
+ return ret;
+
+ napi_enable(&pdata->napi);
+ ret = xge_request_irq(ndev);
+ if (ret)
+ return ret;
+
+ xge_intr_enable(pdata);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+ netif_carrier_on(ndev);
+
+ return 0;
+}
+
+static int xge_close(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+ xge_mac_disable(pdata);
+
+ xge_intr_disable(pdata);
+ xge_free_irq(ndev);
+ napi_disable(&pdata->napi);
+ xge_delete_desc_rings(ndev);
+
+ return 0;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct xge_pdata *pdata;
+ int processed;
+
+ pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ processed = xge_rx_poll(ndev, budget);
+
+ if (processed < budget) {
+ napi_complete_done(napi, processed);
+ xge_intr_enable(pdata);
+ }
+
+ return processed;
+}
+
+static int xge_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ xge_mac_set_station_addr(pdata);
+
+ return 0;
+}
+
+static bool is_tx_pending(struct xge_raw_desc *raw_desc)
+{
+ if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_free_pending_skb(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ int i;
+
+ tx_ring = pdata->tx_ring;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &tx_ring->raw_desc[i];
+
+ if (!is_tx_pending(raw_desc))
+ continue;
+
+ skb = tx_ring->pkt_info[i].skb;
+ dma_addr = tx_ring->pkt_info[i].dma_addr;
+ pkt_buf = tx_ring->pkt_info[i].pkt_buf;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_timeout(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ rtnl_lock();
+
+ if (netif_running(ndev)) {
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+ xge_intr_disable(pdata);
+ napi_disable(&pdata->napi);
+
+ xge_wr_csr(pdata, DMATXCTRL, 0);
+ xge_txc_poll(ndev);
+ xge_free_pending_skb(ndev);
+ xge_wr_csr(pdata, DMATXSTATUS, ~0U);
+
+ xge_setup_desc(pdata->tx_ring);
+ xge_update_tx_desc_addr(pdata);
+ xge_mac_init(pdata);
+
+ napi_enable(&pdata->napi);
+ xge_intr_enable(pdata);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+ netif_carrier_on(ndev);
+ }
+
+ rtnl_unlock();
+}
+
+static void xge_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_stats *stats = &pdata->stats;
+
+ storage->tx_packets += stats->tx_packets;
+ storage->tx_bytes += stats->tx_bytes;
+
+ storage->rx_packets += stats->rx_packets;
+ storage->rx_bytes += stats->rx_bytes;
+ storage->rx_errors += stats->rx_errors;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+ .ndo_open = xge_open,
+ .ndo_stop = xge_close,
+ .ndo_start_xmit = xge_start_xmit,
+ .ndo_set_mac_address = xge_set_mac_addr,
+ .ndo_tx_timeout = xge_timeout,
+ .ndo_get_stats64 = xge_get_stats64,
+};
+
+static int xge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct xge_pdata *pdata;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct xge_pdata));
+ if (!ndev)
+ return -ENOMEM;
+
+ pdata = netdev_priv(ndev);
+
+ pdata->pdev = pdev;
+ pdata->ndev = ndev;
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, pdata);
+ ndev->netdev_ops = &xgene_ndev_ops;
+
+ ndev->features |= NETIF_F_GSO |
+ NETIF_F_GRO;
+
+ ret = xge_get_resources(pdata);
+ if (ret)
+ goto err;
+
+ ndev->hw_features = ndev->features;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ netdev_err(ndev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = xge_init_hw(ndev);
+ if (ret)
+ goto err;
+
+ netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+
+ netif_carrier_off(ndev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int xge_remove(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+ struct net_device *ndev;
+
+ pdata = platform_get_drvdata(pdev);
+ ndev = pdata->ndev;
+
+ rtnl_lock();
+ if (netif_running(ndev))
+ dev_close(ndev);
+ rtnl_unlock();
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static void xge_shutdown(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata)
+ return;
+
+ if (!pdata->ndev)
+ return;
+
+ xge_remove(pdev);
+}
+
+static const struct acpi_device_id xge_acpi_match[] = {
+ { "APMC0D80" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+
+static struct platform_driver xge_driver = {
+ .driver = {
+ .name = "xgene-enet-v2",
+ .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ },
+ .probe = xge_probe,
+ .remove = xge_remove,
+ .shutdown = xge_shutdown,
+};
+module_platform_driver(xge_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_VERSION(XGENE_ENET_V2_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644
index 000000000000..ada7b0e82586
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -0,0 +1,75 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAIN_H__
+#define __XGENE_ENET_V2_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mac.h"
+#include "enet.h"
+#include "ring.h"
+
+#define XGENE_ENET_V2_VERSION "v1.0"
+#define XGENE_ENET_STD_MTU 1536
+#define XGENE_ENET_MIN_FRAME 60
+#define IRQ_ID_SIZE 16
+
+struct xge_resource {
+ void __iomem *base_addr;
+ int phy_mode;
+ u32 irq;
+};
+
+struct xge_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+};
+
+/* ethernet private data */
+struct xge_pdata {
+ struct xge_resource resources;
+ struct xge_desc_ring *tx_ring;
+ struct xge_desc_ring *rx_ring;
+ struct platform_device *pdev;
+ char irq_name[IRQ_ID_SIZE];
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct xge_stats stats;
+ int phy_speed;
+ u8 nbufs;
+};
+
+#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644
index 000000000000..38810828f8f0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.c
@@ -0,0 +1,81 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+/* create circular linked list of descriptors */
+void xge_setup_desc(struct xge_desc_ring *ring)
+{
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_h, next_dma;
+ u16 offset;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &ring->raw_desc[i];
+
+ offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
+ next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ dma_h = upper_32_bits(next_dma);
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
+ SET_BITS(NEXT_DESC_ADDRH, dma_h));
+ }
+}
+
+void xge_update_tx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->tx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMATXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_update_rx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMARXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_intr_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = RX_PKT_RCVD | TX_PKT_SENT;
+ xge_wr_csr(pdata, DMAINTRMASK, data);
+}
+
+void xge_intr_disable(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, DMAINTRMASK, 0);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644
index 000000000000..abc8c9a84954
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.h
@@ -0,0 +1,119 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_RING_H__
+#define __XGENE_ENET_V2_RING_H__
+
+#define XGENE_ENET_DESC_SIZE 64
+#define XGENE_ENET_NUM_DESC 256
+#define NUM_BUFS 8
+#define SLOT_EMPTY 0xfff
+
+#define DMATXCTRL 0xa180
+#define DMATXDESCL 0xa184
+#define DMATXDESCH 0xa1a0
+#define DMATXSTATUS 0xa188
+#define DMARXCTRL 0xa18c
+#define DMARXDESCL 0xa190
+#define DMARXDESCH 0xa1a4
+#define DMARXSTATUS 0xa194
+#define DMAINTRMASK 0xa198
+#define DMAINTERRUPT 0xa19c
+
+#define D_POS 62
+#define D_LEN 2
+#define E_POS 63
+#define E_LEN 1
+#define PKT_ADDRL_POS 0
+#define PKT_ADDRL_LEN 32
+#define PKT_ADDRH_POS 32
+#define PKT_ADDRH_LEN 10
+#define PKT_SIZE_POS 32
+#define PKT_SIZE_LEN 12
+#define NEXT_DESC_ADDRL_POS 0
+#define NEXT_DESC_ADDRL_LEN 32
+#define NEXT_DESC_ADDRH_POS 48
+#define NEXT_DESC_ADDRH_LEN 10
+
+#define TXPKTCOUNT_POS 16
+#define TXPKTCOUNT_LEN 8
+#define RXPKTCOUNT_POS 16
+#define RXPKTCOUNT_LEN 8
+
+#define TX_PKT_SENT BIT(0)
+#define TX_BUS_ERROR BIT(3)
+#define RX_PKT_RCVD BIT(4)
+#define RX_BUS_ERROR BIT(7)
+#define RXSTATUS_RXPKTRCVD BIT(0)
+
+struct xge_raw_desc {
+ __le64 m0;
+ __le64 m1;
+ __le64 m2;
+ __le64 m3;
+ __le64 m4;
+ __le64 m5;
+ __le64 m6;
+ __le64 m7;
+};
+
+struct pkt_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *pkt_buf;
+};
+
+/* software context of a descriptor ring */
+struct xge_desc_ring {
+ struct net_device *ndev;
+ dma_addr_t dma_addr;
+ u8 head;
+ u8 tail;
+ union {
+ void *desc_addr;
+ struct xge_raw_desc *raw_desc;
+ };
+ struct pkt_info (*pkt_info);
+};
+
+static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define SET_BITS(field, val) \
+ xge_set_desc_bits(field ## _POS, field ## _LEN, val)
+
+#define GET_BITS(field, src) \
+ xge_get_desc_bits(field ## _POS, field ## _LEN, src)
+
+void xge_setup_desc(struct xge_desc_ring *ring);
+void xge_update_tx_desc_addr(struct xge_pdata *pdata);
+void xge_update_rx_desc_addr(struct xge_pdata *pdata);
+void xge_intr_enable(struct xge_pdata *pdata);
+void xge_intr_disable(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_RING_H__ */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 940fb24bba21..96413808c726 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -109,7 +109,6 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
- select HWMON
imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config TIGON3_HWMON
+ bool "Broadcom Tigon3 HWMON support"
+ default y
+ depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor on Tigon3 devices.
+
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index d59cfcc4c4d5..cf15b7e2929c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -192,36 +192,50 @@ static int bgmac_probe(struct bcma_device *core)
goto err1;
}
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
+ bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO);
if (bgmac->has_robosw)
dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ if (sprom->boardflags_lo & BGMAC_BFL_ENETADM)
dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
/* Feature Flags */
- switch (core->bus->chipinfo.id) {
+ switch (ci->id) {
+ /* BCM 471X/535X family */
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM53572:
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+ ci->pkg == BCMA_PKG_ID_BCM47186) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
- case BCMA_CHIP_ID_BCM53572:
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ case BCMA_CHIP_ID_BCM53573:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
}
break;
case BCMA_CHIP_ID_BCM4749:
@@ -229,18 +243,11 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == 10) {
+ if (ci->pkg == 10) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
break;
- case BCMA_CHIP_ID_BCM4716:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
- case BCMA_CHIP_ID_BCM47162:
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- break;
/* bcm4707_family */
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM47094:
@@ -249,21 +256,6 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
- case BCMA_CHIP_ID_BCM53573:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
- if (core->core_unit == 0) {
- bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |=
- BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
- } else if (core->core_unit == 1) {
- bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
- bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
- }
- break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index da1b8b225eb9..73aca97a96bc 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -21,8 +21,12 @@
#include <linux/of_net.h>
#include "bgmac.h"
+#define NICPM_PADRING_CFG 0x00000004
#define NICPM_IOMUX_CTRL 0x00000008
+#define NICPM_PADRING_CFG_INIT_VAL 0x74000000
+#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000
+
#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
#define NICPM_IOMUX_CTRL_SPD_10M 0
@@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
if (!bgmac->plat.nicpm_base)
return;
+ /* SET RGMII IO CONFIG */
+ writel(NICPM_PADRING_CFG_INIT_VAL,
+ bgmac->plat.nicpm_base + NICPM_PADRING_CFG);
+
val = NICPM_IOMUX_CTRL_INIT_VAL;
switch (bgmac->net_dev->phydev->speed) {
default:
@@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int bgmac_suspend(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_suspend(bgmac);
+}
+
+static int bgmac_resume(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_resume(bgmac);
+}
+
+static const struct dev_pm_ops bgmac_pm_ops = {
+ .suspend = bgmac_suspend,
+ .resume = bgmac_resume
+};
+
+#define BGMAC_PM_OPS (&bgmac_pm_ops)
+#else
+#define BGMAC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id bgmac_of_enet_match[] = {
{.compatible = "brcm,amac",},
{.compatible = "brcm,nsp-amac",},
@@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = {
.driver = {
.name = "bgmac-enet",
.of_match_table = bgmac_of_enet_match,
+ .pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
.remove = bgmac_remove,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fd66fca00e01..e1a24ee6ab8b 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1480,6 +1480,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
+ dev_set_drvdata(bgmac->dev, bgmac);
if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
@@ -1552,5 +1553,55 @@ void bgmac_enet_remove(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
+int bgmac_enet_suspend(struct bgmac *bgmac)
+{
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ phy_stop(bgmac->net_dev->phydev);
+
+ netif_stop_queue(bgmac->net_dev);
+
+ napi_disable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_detach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ bgmac_chip_intrs_off(bgmac);
+ bgmac_chip_reset(bgmac);
+ bgmac_dma_cleanup(bgmac);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
+
+int bgmac_enet_resume(struct bgmac *bgmac)
+{
+ int rc;
+
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ rc = bgmac_dma_init(bgmac);
+ if (rc)
+ return rc;
+
+ bgmac_chip_init(bgmac);
+
+ napi_enable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_attach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ netif_start_queue(bgmac->net_dev);
+
+ phy_start(bgmac->net_dev->phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_resume);
+
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 6d1c6ff1ed96..c1818766c501 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -402,7 +402,7 @@
#define BGMAC_WEIGHT 64
-#define ETHER_MAX_LEN 1518
+#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
@@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
+int bgmac_enet_suspend(struct bgmac *bgmac);
+int bgmac_enet_resume(struct bgmac *bgmac);
struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 69015fa50f20..44f9c0a1f85d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* GENET TDMA hardware does not support a configurable timeout, but will
* always generate an interrupt either after MBDONE packets have been
- * transmitted, or when the ring is emtpy.
+ * transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -1078,8 +1078,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= (EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv))
+ reg |= EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR;
+ else
+ reg |= EXT_PWR_DOWN_PHY;
+
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, false);
@@ -1104,12 +1113,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_BIAS);
- /* fallthrough */
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv)) {
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR);
+ reg |= EXT_PHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ mdelay(1);
+
+ reg &= ~EXT_PHY_RESET;
+ } else {
+ reg &= ~EXT_PWR_DOWN_PHY;
+ reg |= EXT_PWR_DN_EN_LD;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ break;
+
case GENET_POWER_CABLE_SENSE:
/* enable APD */
- reg |= EXT_PWR_DN_EN_LD;
+ if (!GENET_IS_V5(priv)) {
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
break;
case GENET_POWER_WOL_MAGIC:
bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1117,39 +1148,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
default:
break;
}
-
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE) {
- bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
- }
}
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int val = 0;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (!priv->phydev)
- val = -ENODEV;
- else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
- break;
-
- default:
- val = -EINVAL;
- break;
- }
+ if (!priv->phydev)
+ return -ENODEV;
- return val;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1240,14 +1252,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
- /* Compute how many buffers are transmitted since last xmit call */
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- c_index &= DMA_C_INDEX_MASK;
-
- if (likely(c_index >= ring->c_index))
- txbds_ready = c_index - ring->c_index;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX)
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_CLEAR);
else
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_CLEAR);
+
+ /* Compute how many buffers are transmitted since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+ & DMA_C_INDEX_MASK;
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, tx_done, dev,
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1280,7 +1296,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
}
ring->free_bds += txbds_processed;
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ ring->c_index = c_index;
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
@@ -1288,7 +1304,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
pkts_compl, bytes_compl);
- return pkts_compl;
+ return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1657,10 +1673,21 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
- unsigned int p_index;
+ unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX) {
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+ INTRL2_CPU_CLEAR);
+ } else {
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv,
+ mask,
+ INTRL2_CPU_CLEAR);
+ }
+
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
@@ -1680,12 +1707,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
}
p_index &= DMA_P_INDEX_MASK;
-
- if (likely(p_index >= ring->c_index))
- rxpkttoprocess = p_index - ring->c_index;
- else
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
- p_index;
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1912,10 +1934,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
/* Mask all interrupts.*/
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1942,8 +1962,6 @@ static int init_umac(struct bcmgenet_priv *priv)
int ret;
u32 reg;
u32 int0_enable = 0;
- u32 int1_enable = 0;
- int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1970,12 +1988,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- /* Enable Rx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
- /* Enable Tx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
/* Configure backpressure vectors for MoCA */
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
@@ -1993,18 +2005,8 @@ static int init_umac(struct bcmgenet_priv *priv)
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- /* Enable Rx priority queue interrupts */
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
- /* Enable Tx priority queue interrupts */
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
- int1_enable |= (1 << i);
-
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- /* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
return 0;
@@ -2136,22 +2138,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << i);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2264,22 +2277,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2634,6 +2658,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
@@ -2921,7 +2954,7 @@ static int bcmgenet_close(struct net_device *dev)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3186,6 +3219,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
+ [GENET_V5] = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+ },
};
/* Infer hardware parameters from the detected GENET version */
@@ -3196,26 +3248,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
u8 major;
u16 gphy_rev;
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V4;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V3;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V2;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V1;
}
/* enum genet_version starts at 1 */
@@ -3225,7 +3273,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 5)
+ if (major == 6)
+ major = 5;
+ else if (major == 5)
major = 4;
else if (major == 0)
major = 1;
@@ -3253,19 +3303,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ if (GENET_IS_V5(priv)) {
+ /* The EPHY revision should come from the MDIO registers of
+ * the PHY not from GENET.
+ */
+ if (gphy_rev != 0) {
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+ gphy_rev);
+ }
/* This is reserved so should require special treatment */
- if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
- }
-
/* This is the good old scheme, just GPHY major, no minor nor patch */
- if ((gphy_rev & 0xf0) != 0)
+ } else if ((gphy_rev & 0xf0) != 0) {
priv->gphy_rev = gphy_rev << 8;
-
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
- else if ((gphy_rev & 0xff00) != 0)
+ } else if ((gphy_rev & 0xff00) != 0) {
priv->gphy_rev = gphy_rev;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -3295,6 +3351,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3492,7 +3549,7 @@ static int bcmgenet_suspend(struct device *d)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289d65ae..5692c0582434 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -355,8 +355,14 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DN_EN_LD (1 << 3)
#define EXT_ENERGY_DET (1 << 4)
#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_IDDQ_GLBL_PWR (1 << 7)
#define EXT_PHY_RESET (1 << 8)
#define EXT_ENERGY_DET_MASK (1 << 12)
+#define EXT_PWR_DOWN_PHY_TX (1 << 16)
+#define EXT_PWR_DOWN_PHY_RX (1 << 17)
+#define EXT_PWR_DOWN_PHY_SD (1 << 18)
+#define EXT_PWR_DOWN_PHY_RD (1 << 19)
+#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_LINK (1 << 4)
@@ -499,13 +505,15 @@ enum bcmgenet_version {
GENET_V1 = 1,
GENET_V2,
GENET_V3,
- GENET_V4
+ GENET_V4,
+ GENET_V5
};
#define GENET_IS_V1(p) ((p)->version == GENET_V1)
#define GENET_IS_V2(p) ((p)->version == GENET_V2)
#define GENET_IS_V3(p) ((p)->version == GENET_V3)
#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+#define GENET_IS_V5(p) ((p)->version == GENET_V5)
/* Hardware flags */
#define GENET_HAS_40BITS (1 << 0)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b97122926d3a..2fbd027f0148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- u32 cpu_mask_clear;
int retries = 0;
u32 reg;
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- /* Enable the MPD interrupt */
- cpu_mask_clear = UMAC_IRQ_MPD_R;
-
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
-
return 0;
}
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
- u32 cpu_mask_set;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->crc_fwd_en = 0;
-
- /* Stop monitoring magic packet IRQ */
- cpu_mask_set = UMAC_IRQ_MPD_R;
-
- /* Stop monitoring magic packet IRQ */
- bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e87607621e62..8df47c90cfc5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -195,29 +195,31 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (!GENET_IS_V4(priv))
- return;
-
- reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
- if (enable) {
- reg &= ~EXT_CK25_DIS;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
- reg |= EXT_GPHY_RESET;
+ if (GENET_IS_V4(priv)) {
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ if (enable) {
+ reg &= ~EXT_CK25_DIS;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~EXT_GPHY_RESET;
+ } else {
+ reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+ reg |= EXT_CK25_DIS;
+ }
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~EXT_GPHY_RESET;
+ udelay(60);
} else {
- reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg |= EXT_CK25_DIS;
}
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- udelay(60);
}
static void bcmgenet_internal_phy_setup(struct net_device *dev)
@@ -227,10 +229,12 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
/* Power up PHY */
bcmgenet_phy_power_set(dev, true);
- /* enable APD */
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_PWR_DN_EN_LD;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ if (!GENET_IS_V5(priv)) {
+ /* enable APD */
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
bcmgenet_mii_reset(dev);
}
@@ -238,10 +242,12 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ if (!GENET_IS_V5(priv)) {
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ }
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->phydev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 30d1eb9ebec9..f395b951f5e7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
return timeout_us ? 0 : -EBUSY;
}
+#ifdef CONFIG_TIGON3_HWMON
static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
{
u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
return 0;
}
+#endif
static int tg3_ape_send_event(struct tg3 *tp, u32 event)
{
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
}
}
+#else
+static inline void tg3_hwmon_close(struct tg3 *tp) { }
+static inline void tg3_hwmon_open(struct tg3 *tp) { }
+#endif /* CONFIG_TIGON3_HWMON */
#define TG3_STAT_ADD32(PSTAT, REG) \
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 92f46b1375c3..ca529a78bca7 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -805,7 +805,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
}
for (i = 0; i < num_iqs; i++) {
- int numa_node = cpu_to_node(i % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
spin_lock_init(&lio->glist_lock[i]);
@@ -2555,6 +2555,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
__func__);
return 1;
}
+
+ if (octeon_dev->ioq_vector) {
+ struct octeon_ioq_vector *ioq_vector;
+
+ ioq_vector = &octeon_dev->ioq_vector[q];
+ netif_set_xps_queue(netdev,
+ &ioq_vector->affinity_mask,
+ ioq_vector->iq_index);
+ }
}
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 9675ffbf25e6..e21b477d0159 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 iq_no = 0;
union oct_txpciq txpciq;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
num_descs =
@@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 desc_size = 0;
u32 oq_no = 0;
- int numa_node = cpu_to_node(oq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct)) {
num_descs =
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 79f809479af6..00970597ada8 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -226,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -267,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct,
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
- set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!droq->desc_ring)
- droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
- (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -970,7 +964,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 4608a5af35a3..5063a12613e5 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -152,7 +152,7 @@ struct octeon_instr_queue {
struct oct_iq_stats stats;
/** DMA mapped base address of the input descriptor ring. */
- u64 base_addr_dma;
+ dma_addr_t base_addr_dma;
/** Application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 707bc15adec6..261f448f9de2 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
@@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
iq->oct_dev = oct;
- set_dev_node(&oct->pci_dev->dev, numa_node);
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!iq->base_addr)
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
+ iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct,
void *app_ctx)
{
u32 iq_no = (u32)txpciq.s.q_no;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e2ca107f9d94..aa769cbc7425 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* L4 Type field: TCP */
#define FM_L4_PARSE_RESULT_TCP 0x20
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
* For conformity, we'll still declare GSO explicitly.
*/
net_dev->features |= NETIF_F_GSO;
+ net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* we do not want shared skbs on TX */
@@ -334,6 +342,41 @@ static void dpaa_get_stats64(struct net_device *net_dev,
}
}
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ int i;
+
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ if (tc->tc == priv->num_tc)
+ return 0;
+
+ if (!tc->tc) {
+ netdev_reset_tc(net_dev);
+ goto out;
+ }
+
+ if (tc->tc > DPAA_TC_NUM) {
+ netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
+ DPAA_TC_NUM);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(net_dev, tc->tc);
+
+ for (i = 0; i < tc->tc; i++)
+ netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
+ i * DPAA_TC_TXQ_NUM);
+
+out:
+ priv->num_tc = tc->tc ? tc->tc : 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ return 0;
+}
+
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{
struct platform_device *of_dev;
@@ -557,16 +600,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
/* Use multiple WQs for FQ assignment:
* - Tx Confirmation queues go to WQ1.
- * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- * to be scheduled, in case there are many more FQs in WQ3).
- * - Rx Default and Tx queues go to WQ3 (no differentiation between
- * Rx and Tx traffic).
+ * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ6).
+ * - Rx Default goes to WQ6.
+ * - Tx queues go to different WQs depending on their priority. Equal
+ * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
+ * WQ0 (highest priority).
* This ensures that Tx-confirmed buffers are timely released. In particular,
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
* are greatly outnumbered by other FQs in the system, while
* dequeue scheduling is round-robin.
*/
-static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{
switch (fq->fq_type) {
case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +620,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
break;
case FQ_TYPE_RX_ERROR:
case FQ_TYPE_TX_ERROR:
- fq->wq = 2;
+ fq->wq = 5;
break;
case FQ_TYPE_RX_DEFAULT:
+ fq->wq = 6;
+ break;
case FQ_TYPE_TX:
- fq->wq = 3;
+ switch (idx / DPAA_TC_TXQ_NUM) {
+ case 0:
+ /* Low priority (best effort) */
+ fq->wq = 6;
+ break;
+ case 1:
+ /* Medium priority */
+ fq->wq = 2;
+ break;
+ case 2:
+ /* High priority */
+ fq->wq = 1;
+ break;
+ case 3:
+ /* Very high priority */
+ fq->wq = 0;
+ break;
+ default:
+ WARN(1, "Too many TX FQs: more than %d!\n",
+ DPAA_ETH_TXQ_NUM);
+ }
break;
default:
WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +674,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
}
for (i = 0; i < count; i++)
- dpaa_assign_wq(dpaa_fq + i);
+ dpaa_assign_wq(dpaa_fq + i, i);
return dpaa_fq;
}
@@ -985,7 +1052,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
- initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
+ QM_FQCTRL_CTXASTASHING);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1123,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
return err;
}
-static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_params params;
@@ -1076,23 +1144,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
@@ -1120,32 +1194,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpaa_buffer_layout *buf_layout,
- struct device *dev)
+static int dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
{
struct fman_port *rxport = mac_dev->port[RX];
struct fman_port *txport = mac_dev->port[TX];
+ int err;
+
+ err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ if (err)
+ return err;
+
+ err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
- dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- port_fqs->tx_defq, &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
+ return err;
}
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1612,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
return skb;
}
+static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
+{
+ /* The parser has run and performed L4 checksum validation.
+ * We know there were no parser errors (and implicitly no
+ * L4 csum error), otherwise we wouldn't be here.
+ */
+ if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
+ (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
+ return CHECKSUM_UNNECESSARY;
+
+ /* We're here because either the parser didn't run or the L4 checksum
+ * was not verified. This may include the case of a UDP frame with
+ * checksum zero or an L4 proto other than TCP/UDP
+ */
+ return CHECKSUM_NONE;
+}
+
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb.
@@ -1556,7 +1659,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
return skb;
@@ -1616,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
if (WARN_ON(unlikely(!skb)))
goto free_buffers;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
/* Make sure forwarded skbs will have enough space
* on Tx, if extra headers are added.
@@ -2093,7 +2196,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_addr_t addr = qm_fd_addr(fd);
enum qm_fd_format fd_format;
struct net_device *net_dev;
- u32 fd_status = fd->status;
+ u32 fd_status;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
unsigned int skb_len;
@@ -2350,6 +2453,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
.ndo_do_ioctl = dpaa_ioctl,
+ .ndo_setup_tc = dpaa_setup_tc,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2728,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
- &priv->buf_layout[0], dev);
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
+ if (err)
+ goto init_ports_failed;
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
if (!priv->percpu_priv) {
@@ -2638,6 +2744,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+ priv->num_tc = 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
if (err < 0)
@@ -2658,6 +2767,7 @@ netdev_init_failed:
napi_add_failed:
dpaa_napi_del(net_dev);
alloc_percpu_failed:
+init_ports_failed:
dpaa_fq_free(dev, &priv->dpaa_fq_list);
fq_alloc_failed:
qman_delete_cgr_safe(&priv->ingress_cgr);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 1f9aebf3f3c5..9941a7866ebe 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -39,7 +39,12 @@
#include "mac.h"
#include "dpaa_eth_trace.h"
-#define DPAA_ETH_TXQ_NUM NR_CPUS
+/* Number of prioritised traffic classes */
+#define DPAA_TC_NUM 4
+/* Number of Tx queues per traffic class */
+#define DPAA_TC_TXQ_NUM NR_CPUS
+/* Total number of Tx queues */
+#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
@@ -152,6 +157,7 @@ struct dpaa_priv {
u16 channel;
struct list_head dpaa_fq_list;
+ u8 num_tc;
u32 msg_enable; /* net_device message level */
struct {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f60845f0c6ca..4aefe2438969 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -59,6 +59,7 @@
#define DMA_OFFSET 0x000C2000
#define FPM_OFFSET 0x000C3000
#define IMEM_OFFSET 0x000C4000
+#define HWP_OFFSET 0x000C7000
#define CGP_OFFSET 0x000DB000
/* Exceptions bit map */
@@ -218,6 +219,9 @@
#define QMI_GS_HALT_NOT_BUSY 0x00000002
+/* HWP defines */
+#define HWP_RPIMAC_PEN 0x00000001
+
/* IRAM defines */
#define IRAM_IADD_AIE 0x80000000
#define IRAM_READY 0x80000000
@@ -475,6 +479,12 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
+struct fman_hwp_regs {
+ u32 res0000[0x844 / 4]; /* 0x000..0x843 */
+ u32 fmprrpimac; /* FM Parser Internal memory access control */
+ u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
+};
+
/* Structure that holds current FMan state.
* Used for saving run time information.
*/
@@ -606,6 +616,7 @@ struct fman {
struct fman_bmi_regs __iomem *bmi_regs;
struct fman_qmi_regs __iomem *qmi_regs;
struct fman_dma_regs __iomem *dma_regs;
+ struct fman_hwp_regs __iomem *hwp_regs;
fman_exceptions_cb *exception_cb;
fman_bus_error_cb *bus_error_cb;
/* Spinlock for FMan use */
@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
+static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
+{
+ /* enable HW Parser */
+ iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
+}
+
static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
state->max_num_of_open_dmas = 32;
state->fm_port_num_of_cg = 256;
state->num_of_rx_ports = 6;
- state->total_fifo_size = 122 * 1024;
+ state->total_fifo_size = 136 * 1024;
break;
case 2:
@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
fman->bmi_regs = base_addr + BMI_OFFSET;
fman->qmi_regs = base_addr + QMI_OFFSET;
fman->dma_regs = base_addr + DMA_OFFSET;
+ fman->hwp_regs = base_addr + HWP_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
+ /* Init HW Parser */
+ hwp_init(fman->hwp_regs);
+
err = enable(fman, cfg);
if (err != 0)
return err;
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 57aae8d17d77..f53e1473dbcc 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -134,14 +134,14 @@ enum fman_exceptions {
struct fman_prs_result {
u8 lpid; /* Logical port id */
u8 shimr; /* Shim header result */
- u16 l2r; /* Layer 2 result */
- u16 l3r; /* Layer 3 result */
+ __be16 l2r; /* Layer 2 result */
+ __be16 l3r; /* Layer 3 result */
u8 l4r; /* Layer 4 result */
u8 cplan; /* Classification plan id */
- u16 nxthdr; /* Next Header */
- u16 cksum; /* Running-sum */
+ __be16 nxthdr; /* Next Header */
+ __be16 cksum; /* Running-sum */
/* Flags&fragment-offset field of the last IP-header */
- u16 flags_frag_off;
+ __be16 flags_frag_off;
/* Routing type field of a IPV6 routing extension header */
u8 route_type;
/* Routing Extension Header Present; last bit is IP valid */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 9f3bb50a2365..57bf44fa16a1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -62,6 +62,7 @@
#define BMI_PORT_REGS_OFFSET 0
#define QMI_PORT_REGS_OFFSET 0x400
+#define HWP_PORT_REGS_OFFSET 0x800
/* Default values */
#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
@@ -182,7 +183,7 @@
#define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
-
+#define NIA_ENG_HWP 0x00440000
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
};
+#define HWP_HXS_COUNT 16
+#define HWP_HXS_PHE_REPORT 0x00000800
+#define HWP_HXS_PCAC_PSTAT 0x00000100
+#define HWP_HXS_PCAC_PSTOP 0x00000001
+struct fman_port_hwp_regs {
+ struct {
+ u32 ssa; /* Soft Sequence Attachment */
+ u32 lcv; /* Line-up Enable Confirmation Mask */
+ } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
+ u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
+ u32 fmpr_pcac; /* Configuration Access Control */
+};
+
/* QMI dequeue prefetch modes */
enum fman_port_deq_prefetch {
FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
@@ -436,6 +450,7 @@ struct fman_port {
union fman_port_bmi_regs __iomem *bmi_regs;
struct fman_port_qmi_regs __iomem *qmi_regs;
+ struct fman_port_hwp_regs __iomem *hwp_regs;
struct fman_sp_buffer_offsets buffer_offsets;
@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
/* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ tmp |= NIA_ENG_HWP;
iowrite32be(tmp, &regs->fmbm_rfne);
+ /* Parser Next Engine NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
+
/* Enqueue NIA */
iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
return 0;
}
+static void stop_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout stopping HW Parser\n");
+}
+
+static void start_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(0, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout starting HW Parser\n");
+}
+
+static void init_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int i;
+
+ stop_port_hwp(port);
+
+ for (i = 0; i < HWP_HXS_COUNT; i++) {
+ /* enable HXS error reporting into FD[STATUS] PHE */
+ iowrite32be(0x00000000, &regs->pmda[i].ssa);
+ iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+ }
+
+ start_port_hwp(port);
+}
+
static int init(struct fman_port *port)
{
int err;
@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
err = init_bmi_rx(port);
+ if (!err)
+ init_hwp(port);
break;
case FMAN_PORT_TYPE_TX:
err = init_bmi_tx(port);
@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
/* Init QMI registers */
err = init_qmi(port);
- return err;
+ if (err)
+ return err;
return 0;
}
@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Allocate the FM driver's parameters structure */
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
if (!port->cfg)
- goto err_params;
+ return -EINVAL;
/* Initialize FM port parameters which will be kept by the driver */
port->port_type = port->dts_params.type;
@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* set memory map pointers */
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+ port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */
@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
err_port_cfg:
kfree(port->cfg);
-err_params:
- kfree(port);
return -EINVAL;
}
EXPORT_SYMBOL(fman_port_config);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 90fa5bf23d1b..0da0752fedef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -186,60 +186,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
}
}
-static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
+ supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_10_FULL)
- ecmd->supported |= SUPPORTED_10baseT_Full;
+ supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
- ecmd->advertising = ecmd->supported;
+ advertising = supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
- ecmd->advertising = 0;
+ advertising = 0;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+ advertising |= supported & ADVRTSD_MSK_10G;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
- if (ecmd->supported & SUPPORTED_1000baseKX_Full)
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ if (supported & SUPPORTED_1000baseKX_Full)
+ advertising |= ADVERTISED_1000baseKX_Full;
else
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
} else {
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
}
if (autoneg) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
@@ -248,14 +250,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_phy_qt:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +275,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
@@ -285,102 +287,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_sfp_type_unknown:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
/* Indicate pause support */
- ecmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case ixgbe_fc_rx_pause:
- ecmd->advertising |= ADVERTISED_Pause |
+ advertising |= ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
break;
case ixgbe_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
+ advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
}
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ cmd->base.speed = SPEED_5000;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ cmd->base.speed = SPEED_2500;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case IXGBE_LINK_SPEED_100_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case IXGBE_LINK_SPEED_10_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
s32 err = 0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -388,12 +401,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->advertising & ~ecmd->supported)
+ if (advertising & ~supported)
return -EINVAL;
/* only allow one speed at a time if no autoneg */
- if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
- if (ecmd->advertising ==
+ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
+ if (advertising ==
(ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full))
return -EINVAL;
@@ -401,16 +414,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
old = hw->phy.autoneg_advertised;
advertised = 0;
- if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ if (advertising & ADVERTISED_10000baseT_Full)
advertised |= IXGBE_LINK_SPEED_10GB_FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (advertising & ADVERTISED_1000baseT_Full)
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
advertised |= IXGBE_LINK_SPEED_10_FULL;
if (old == advertised)
@@ -428,10 +441,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
} else {
/* in this case we currently only support 10Gb/FULL */
- u32 speed = ethtool_cmd_speed(ecmd);
- if ((ecmd->autoneg == AUTONEG_ENABLE) ||
- (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ u32 speed = cmd->base.speed;
+
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (advertising != ADVERTISED_10000baseT_Full) ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -3402,8 +3416,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
- .get_settings = ixgbe_get_settings,
- .set_settings = ixgbe_set_settings,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
@@ -3442,6 +3454,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d2555e8b947e..da6fb825afea 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -82,13 +82,13 @@ config MVNETA_BM
that all dependencies are met.
config MVPP2
- tristate "Marvell Armada 375 network interface support"
+ tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA 375 SoC.
+ Marvell ARMADA 375, 7K and 8K SoCs.
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d00421b9ffea..af5bfa13d976 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -25,6 +25,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -49,9 +50,11 @@
#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
#define MVPP2_RXQ_POOL_SHORT_OFFS 20
-#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
#define MVPP2_RXQ_POOL_LONG_OFFS 24
-#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
@@ -99,6 +102,7 @@
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP22_DESC_ADDR_OFFS 8
#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
@@ -117,9 +121,6 @@
#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
-#define MVPP2_TXQ_THRESH_REG 0x2094
-#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
-#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
#define MVPP2_TXQ_INDEX_REG 0x2098
#define MVPP2_TXQ_PREF_BUF_REG 0x209c
#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
@@ -140,6 +141,7 @@
#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
@@ -152,10 +154,52 @@
#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
#define MVPP2_BASE_ADDR_ENABLE 0x4060
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
/* Interrupt Cause and Mask registers */
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
-#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
@@ -210,14 +254,19 @@
#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
+#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
#define MVPP2_BM_VIRT_RLS_REG 0x64c0
-#define MVPP2_BM_MC_RLS_REG 0x64c4
-#define MVPP2_BM_MC_ID_MASK 0xfff
-#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
+#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
+#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
@@ -287,6 +336,24 @@
#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_CTRL_4_REG 0x90
+#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
+#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
+#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+
+/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL3_REG 0x11c
+#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+
+/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG 0x1204
+#define MVPP22_SMI_POLLING_EN BIT(10)
+
+#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -335,15 +402,9 @@
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
-/* Maximum number of RXQs used by single port */
-#define MVPP2_MAX_RXQ 8
-
/* Dfault number of RXQs in use */
#define MVPP2_DEFAULT_RXQ 4
-/* Total number of RXQs available to all ports */
-#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
-
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD 128
@@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast {
*/
#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
+#define MVPP21_ADDR_SPACE_SZ 0
+#define MVPP22_ADDR_SPACE_SZ SZ_64K
+
+#define MVPP2_MAX_CPUS 4
+
enum mvpp2_bm_type {
MVPP2_BM_FREE,
MVPP2_BM_SWF_LONG,
@@ -626,12 +692,19 @@ enum mvpp2_bm_type {
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
- void __iomem *base;
void __iomem *lms_base;
+ void __iomem *iface_base;
+
+ /* On PPv2.2, each CPU can access the base register through a
+ * separate address space, each 64 KB apart from each
+ * other.
+ */
+ void __iomem *cpu_base[MVPP2_MAX_CPUS];
/* Common clocks */
struct clk *pp_clk;
struct clk *gop_clk;
+ struct clk *mg_clk;
/* List of pointers to port structures */
struct mvpp2_port **port_list;
@@ -649,6 +722,12 @@ struct mvpp2 {
/* Tclk value */
u32 tclk;
+
+ /* HW version */
+ enum { MVPP21, MVPP22 } hw_version;
+
+ /* Maximum number of RXQs per port */
+ unsigned int max_port_rxqs;
};
struct mvpp2_pcpu_stats {
@@ -670,6 +749,11 @@ struct mvpp2_port_pcpu {
struct mvpp2_port {
u8 id;
+ /* Index of the port from the "group of ports" complex point
+ * of view
+ */
+ int gop_id;
+
int irq;
struct mvpp2 *priv;
@@ -741,22 +825,24 @@ struct mvpp2_port {
#define MVPP2_RXD_L3_IP6 BIT(30)
#define MVPP2_RXD_BUF_HDR BIT(31)
-struct mvpp2_tx_desc {
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* physical addr of transmitted buffer */
+ u32 buf_dma_addr; /* physical addr of transmitted buffer */
u32 buf_cookie; /* cookie for access to TX buffer in tx path */
u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
u32 reserved2; /* reserved (for future use) */
};
-struct mvpp2_rx_desc {
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
u32 status; /* info about received packet */
u16 reserved1; /* parser_info (for future use, PnC) */
u16 data_size; /* size of received packet in bytes */
- u32 buf_phys_addr; /* physical address of the buffer */
+ u32 buf_dma_addr; /* physical address of the buffer */
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved2; /* gem_port_id (for future use, PON) */
u16 reserved3; /* csum_l4 (for future use, PnC) */
@@ -767,12 +853,51 @@ struct mvpp2_rx_desc {
u32 reserved8;
};
+/* HW TX descriptor for PPv2.2 */
+struct mvpp22_tx_desc {
+ u32 command;
+ u8 packet_offset;
+ u8 phys_txq;
+ u16 data_size;
+ u64 reserved1;
+ u64 buf_dma_addr_ptp;
+ u64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 */
+struct mvpp22_rx_desc {
+ u32 status;
+ u16 reserved1;
+ u16 data_size;
+ u32 reserved2;
+ u32 reserved3;
+ u64 buf_dma_addr_key_hash;
+ u64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+ union {
+ struct mvpp21_tx_desc pp21;
+ struct mvpp22_tx_desc pp22;
+ };
+};
+
+struct mvpp2_rx_desc {
+ union {
+ struct mvpp21_rx_desc pp21;
+ struct mvpp22_rx_desc pp22;
+ };
+};
+
struct mvpp2_txq_pcpu_buf {
/* Transmitted SKB */
struct sk_buff *skb;
/* Physical address of transmitted buffer */
- dma_addr_t phys;
+ dma_addr_t dma;
/* Size transmitted */
size_t size;
@@ -825,7 +950,7 @@ struct mvpp2_tx_queue {
struct mvpp2_tx_desc *descs;
/* DMA address of the Tx DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last Tx DMA descriptor */
int last_desc;
@@ -848,7 +973,7 @@ struct mvpp2_rx_queue {
struct mvpp2_rx_desc *descs;
/* DMA address of the RX DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last RX DMA descriptor */
int last_desc;
@@ -912,6 +1037,8 @@ struct mvpp2_bm_pool {
/* Buffer Pointers Pool External (BPPE) size */
int size;
+ /* BPPE size in bytes */
+ int size_bytes;
/* Number of buffers for this pool */
int buf_num;
/* Pool buffer size */
@@ -922,29 +1049,13 @@ struct mvpp2_bm_pool {
/* BPPE virtual base address */
u32 *virt_addr;
- /* BPPE physical base address */
- dma_addr_t phys_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
/* Ports using BM pool */
u32 port_map;
};
-struct mvpp2_buff_hdr {
- u32 next_buff_phys_addr;
- u32 next_buff_virt_addr;
- u16 byte_count;
- u16 info;
- u8 reserved1; /* bm_qset (for future use, BM) */
-};
-
-/* Buffer header info bits */
-#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
-#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
-#define MVPP2_B_HDR_INFO_LAST_OFFS 12
-#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
-#define MVPP2_B_HDR_INFO_IS_LAST(info) \
- ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
-
/* Static declaractions */
/* Number of RXQs used by single port */
@@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ;
static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
{
- writel(data, priv->base + offset);
+ writel(data, priv->cpu_base[0] + offset);
}
static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
{
- return readl(priv->base + offset);
+ return readl(priv->cpu_base[0] + offset);
+}
+
+/* These accessors should be used to access:
+ *
+ * - per-CPU registers, where each CPU has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific CPU
+ * window, because they are related to an access to a per-CPU
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel(data, priv->cpu_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl(priv->cpu_base[cpu] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.buf_dma_addr;
+ else
+ return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ if (port->priv->hw_version == MVPP21) {
+ tx_desc->pp21.buf_dma_addr = dma_addr;
+ } else {
+ u64 val = (u64)dma_addr;
+
+ tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+ tx_desc->pp22.buf_dma_addr_ptp |= val;
+ }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.data_size;
+ else
+ return tx_desc->pp22.data_size;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.data_size = size;
+ else
+ tx_desc->pp22.data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.phys_txq = txq;
+ else
+ tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.command = command;
+ else
+ tx_desc->pp22.command = command;
+}
+
+static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int offset)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.packet_offset = offset;
+ else
+ tx_desc->pp22.packet_offset = offset;
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.packet_offset;
+ else
+ return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_dma_addr;
+ else
+ return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_cookie;
+ else
+ return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.data_size;
+ else
+ return rx_desc->pp22.data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.status;
+ else
+ return rx_desc->pp22.status;
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
txq_pcpu->txq_get_index = 0;
}
-static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_put_index;
tx_buf->skb = skb;
- tx_buf->size = tx_desc->data_size;
- tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+ tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+ tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+ mvpp2_txdesc_offset_get(port, tx_desc);
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3378,27 +3656,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
- int size_bytes;
u32 val;
- size_bytes = sizeof(u32) * size;
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
- &bm_pool->phys_addr,
+ /* Number of buffer pointers must be a multiple of 16, as per
+ * hardware constraints
+ */
+ if (!IS_ALIGNED(size, 16))
+ return -EINVAL;
+
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ * bytes per buffer pointer
+ */
+ if (priv->hw_version == MVPP21)
+ bm_pool->size_bytes = 2 * sizeof(u32) * size;
+ else
+ bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+ &bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
return -ENOMEM;
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
- bm_pool->phys_addr);
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr, bm_pool->dma_addr);
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
- bm_pool->phys_addr);
+ lower_32_bits(bm_pool->dma_addr));
mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
@@ -3426,6 +3716,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
}
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *dma_addr,
+ phys_addr_t *phys_addr)
+{
+ int cpu = smp_processor_id();
+
+ *dma_addr = mvpp2_percpu_read(priv, cpu,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+
+ if (priv->hw_version == MVPP22) {
+ u32 val;
+ u32 dma_addr_highbits, phys_addr_highbits;
+
+ val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+ phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+ MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+ if (sizeof(dma_addr_t) == 8)
+ *dma_addr |= (u64)dma_addr_highbits << 32;
+
+ if (sizeof(phys_addr_t) == 8)
+ *phys_addr |= (u64)phys_addr_highbits << 32;
+ }
+}
+
/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
@@ -3433,21 +3751,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
int i;
for (i = 0; i < bm_pool->buf_num; i++) {
- dma_addr_t buf_phys_addr;
- unsigned long vaddr;
+ dma_addr_t buf_dma_addr;
+ phys_addr_t buf_phys_addr;
+ void *data;
- /* Get buffer virtual address (indirect access) */
- buf_phys_addr = mvpp2_read(priv,
- MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+ mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+ &buf_dma_addr, &buf_phys_addr);
- dma_unmap_single(dev, buf_phys_addr,
+ dma_unmap_single(dev, buf_dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
- if (!vaddr)
+ data = (void *)phys_to_virt(buf_phys_addr);
+ if (!data)
break;
- mvpp2_frag_free(bm_pool, (void *)vaddr);
+ mvpp2_frag_free(bm_pool, data);
}
/* Update BM driver with number of buffers removed from pool */
@@ -3471,9 +3789,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
- dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr,
- bm_pool->phys_addr);
+ bm_pool->dma_addr);
return 0;
}
@@ -3529,17 +3847,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_LONG_MASK;
- val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
- MVPP2_RXQ_POOL_LONG_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_LONG_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_LONG_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
@@ -3547,40 +3868,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
int lrxq, int short_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
- val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
- MVPP2_RXQ_POOL_SHORT_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_SHORT_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_SHORT_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
static void *mvpp2_buf_alloc(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_phys_addr,
+ dma_addr_t *buf_dma_addr,
+ phys_addr_t *buf_phys_addr,
gfp_t gfp_mask)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
void *data;
data = mvpp2_frag_alloc(bm_pool);
if (!data)
return NULL;
- phys_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
mvpp2_frag_free(bm_pool, data);
return NULL;
}
- *buf_phys_addr = phys_addr;
+ *buf_dma_addr = dma_addr;
+ *buf_phys_addr = virt_to_phys(data);
return data;
}
@@ -3604,37 +3930,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr)
+ dma_addr_t buf_dma_addr,
+ phys_addr_t buf_phys_addr)
{
- mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
- mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
-}
+ int cpu = smp_processor_id();
-/* Release multicast buffer */
-static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr,
- int mc_id)
-{
- u32 val = 0;
+ if (port->priv->hw_version == MVPP22) {
+ u32 val = 0;
+
+ if (sizeof(dma_addr_t) == 8)
+ val |= upper_32_bits(buf_dma_addr) &
+ MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
+
+ if (sizeof(phys_addr_t) == 8)
+ val |= (upper_32_bits(buf_phys_addr)
+ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+ MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- val |= (mc_id & MVPP2_BM_MC_ID_MASK);
- mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ }
- mvpp2_bm_pool_put(port, pool,
- buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
- buf_virt_addr);
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
}
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- dma_addr_t phys_addr,
- unsigned long cookie)
+ dma_addr_t dma_addr,
+ phys_addr_t phys_addr)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
- mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
/* Allocate buffers for the pool */
@@ -3642,7 +3977,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i, buf_size, total_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
@@ -3657,12 +3993,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
}
for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+ &phys_addr, GFP_KERNEL);
if (!buf)
break;
- mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
- (unsigned long)buf);
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+ phys_addr);
}
/* Update BM driver with number of buffers added to pool */
@@ -3830,7 +4167,8 @@ static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
/* Unmask the current CPU's Rx/Tx interrupts */
@@ -3838,17 +4176,46 @@ static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
- (MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id),
+ (MVPP2_CAUSE_MISC_SUM_MASK |
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
/* Port configuration routines */
+static void mvpp22_port_mii_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ return;
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+ writel(val, port->base + MVPP22_XLG_CTRL3_REG);
+ }
+
+ val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ else
+ val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ val |= MVPP22_CTRL4_SYNC_BYPASS;
+ val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+}
+
static void mvpp2_port_mii_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->priv->hw_version == MVPP22)
+ mvpp22_port_mii_set(port);
+
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
switch (port->phy_interface) {
@@ -3952,16 +4319,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
{
int tx_port_num, val, queue, ptxq, lrxq;
- /* Configure port to loopback if needed */
- if (port->flags & MVPP2_F_LOOPBACK)
- mvpp2_port_loopback_set(port);
+ if (port->priv->hw_version == MVPP21) {
+ /* Configure port to loopback if needed */
+ if (port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port);
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ /* Min. TX threshold must be less than minimal packet length */
+ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ }
/* Disable Legacy WRR, Disable EJP, Release from reset */
tx_port_num = mvpp2_egress_port(port);
@@ -4149,11 +4518,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
}
/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
{
- int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
int cpu = smp_processor_id();
+ int pool;
+
+ pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
+ MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
@@ -4161,18 +4534,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
/* Tx descriptors helper methods */
-/* Get number of Tx descriptors waiting to be transmitted by HW */
-static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- u32 val;
-
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
-
- return val & MVPP2_TXQ_PENDING_MASK;
-}
-
/* Get pointer to next Tx descriptor to be processed (send) by HW */
static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
@@ -4187,7 +4548,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -4216,11 +4578,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
struct mvpp2_tx_queue *txq, int num)
{
u32 val;
+ int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -4321,7 +4684,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+ val = mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -4335,7 +4699,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
for (queue = 0; queue < txq_number; queue++) {
int id = port->txqs[queue]->id;
- mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+ mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(id));
}
}
@@ -4394,12 +4759,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu = smp_processor_id();
+
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
- rxq->pkts_coal);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4449,7 +4816,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@@ -4527,10 +4894,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
int desc_num, int cpu,
struct mvpp2 *priv)
{
+ u32 txq_dma;
+
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
desc_num * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_phys, GFP_KERNEL);
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -4540,10 +4909,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
MVPP2_AGGR_TXQ_INDEX_REG(cpu));
- /* Set Tx descriptors queue starting address */
- /* indirect access */
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
- aggr_txq->descs_phys);
+ /* Set Tx descriptors queue starting address indirect
+ * access
+ */
+ if (priv->hw_version == MVPP21)
+ txq_dma = aggr_txq->descs_dma;
+ else
+ txq_dma = aggr_txq->descs_dma >>
+ MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
return 0;
@@ -4554,12 +4929,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ u32 rxq_dma;
+ int cpu;
+
rxq->size = port->rx_ring_size;
/* Allocate memory for RX descriptors */
rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
- &rxq->descs_phys, GFP_KERNEL);
+ &rxq->descs_dma, GFP_KERNEL);
if (!rxq->descs)
return -ENOMEM;
@@ -4569,10 +4947,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ if (port->priv->hw_version == MVPP21)
+ rxq_dma = rxq->descs_dma;
+ else
+ rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
/* Set Offset */
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4599,10 +4982,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
for (i = 0; i < rx_received; i++) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 bm = mvpp2_bm_cookie_build(rx_desc);
+ u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
@@ -4611,26 +4995,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu;
+
mvpp2_rxq_drop_pkts(port, rxq);
if (rxq->descs)
dma_free_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
rxq->descs,
- rxq->descs_phys);
+ rxq->descs_dma);
rxq->descs = NULL;
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
- rxq->descs_phys = 0;
+ rxq->descs_dma = 0;
/* Clear Rx descriptors queue starting address and size;
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
}
/* Create and initialize a Tx queue */
@@ -4646,23 +5033,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
/* Allocate memory for Tx descriptors */
txq->descs = dma_alloc_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- &txq->descs_phys, GFP_KERNEL);
+ &txq->descs_dma, GFP_KERNEL);
if (!txq->descs)
return -ENOMEM;
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
- MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
- txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ txq->descs_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
@@ -4673,9 +5062,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
(txq->log_id * desc_per_txq);
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
- MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
- MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
/* WRR / EJP configuration - indirect access */
tx_port_num = mvpp2_egress_port(port);
@@ -4716,7 +5105,7 @@ error:
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
return -ENOMEM;
}
@@ -4736,20 +5125,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
if (txq->descs)
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
- txq->descs_phys = 0;
+ txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
}
/* Cleanup Tx ports */
@@ -4759,10 +5149,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
int delay, pending, cpu;
u32 val;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
@@ -4778,11 +5169,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
mdelay(1);
delay++;
- pending = mvpp2_txq_pend_desc_num_get(port, txq);
+ pending = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_TXQ_PENDING_REG);
+ pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -4991,20 +5384,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
static void mvpp2_rx_error(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- u32 status = rx_desc->status;
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
+ status, sz);
break;
}
}
@@ -5031,15 +5425,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, u32 bm)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
+ GFP_ATOMIC);
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
return 0;
}
@@ -5075,43 +5471,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
}
-static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- struct mvpp2_buff_hdr *buff_hdr;
- struct sk_buff *skb;
- u32 rx_status = rx_desc->status;
- dma_addr_t buff_phys_addr;
- unsigned long buff_virt_addr;
- dma_addr_t buff_phys_addr_next;
- unsigned long buff_virt_addr_next;
- int mc_id;
- int pool_id;
-
- pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
- buff_phys_addr = rx_desc->buf_phys_addr;
- buff_virt_addr = rx_desc->buf_cookie;
-
- do {
- skb = (struct sk_buff *)buff_virt_addr;
- buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
-
- mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
-
- buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
- buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
-
- /* Release buffer */
- mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
- buff_virt_addr, mc_id);
-
- buff_phys_addr = buff_phys_addr_next;
- buff_virt_addr = buff_virt_addr_next;
-
- } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
-}
-
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_queue *rxq)
@@ -5132,25 +5491,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
unsigned int frag_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
void *data;
rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
- phys_addr = rx_desc->buf_phys_addr;
- data = (void *)(uintptr_t)rx_desc->buf_cookie;
-
- bm = mvpp2_bm_cookie_build(rx_desc);
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+
+ bm = mvpp2_bm_cookie_build(port, rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
bm_pool = &port->priv->bm_pools[pool];
- /* Check if buffer header is used */
- if (rx_status & MVPP2_RXD_BUF_HDR) {
- mvpp2_buff_hdr_rx(port, rx_desc);
- continue;
- }
/* In case of an error, release the requested buffer pointer
* to the Buffer Manager. This request process is controlled
@@ -5162,9 +5519,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
-
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
continue;
}
@@ -5185,7 +5540,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, phys_addr,
+ dma_unmap_single(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
rcvd_pkts++;
@@ -5216,11 +5571,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
}
static inline void
-tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- dma_unmap_single(dev, desc->buf_phys_addr,
- desc->data_size, DMA_TO_DEVICE);
+ dma_addr_t buf_dma_addr =
+ mvpp2_txdesc_dma_addr_get(port, desc);
+ size_t buf_sz =
+ mvpp2_txdesc_size_get(port, desc);
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@@ -5232,35 +5591,38 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
struct mvpp2_tx_desc *tx_desc;
int i;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = frag->size;
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, frag->size);
- buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
- tx_desc->data_size,
+ buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+ frag->size,
DMA_TO_DEVICE);
- if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+ if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto error;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
- tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L_DESC);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
- tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
}
}
@@ -5272,7 +5634,7 @@ error:
*/
for (i = i - 1; i >= 0; i--) {
tx_desc = txq->descs + i;
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
}
return -ENOMEM;
@@ -5285,7 +5647,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
struct mvpp2_tx_queue *txq, *aggr_txq;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
@@ -5307,35 +5669,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = skb_headlen(skb);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
- buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
- tx_desc->data_size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+ buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
mvpp2_txq_desc_put(txq);
frags = 0;
goto out;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
tx_cmd = mvpp2_skb_tx_csum(port, skb);
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
frags = 0;
goto out;
}
@@ -5396,6 +5761,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx, cause_rx, cause_misc;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
+ int cpu = smp_processor_id();
/* Rx/Tx cause register
*
@@ -5407,8 +5773,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+ cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -5417,8 +5783,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Clear the cause register */
mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
@@ -5530,7 +5897,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
return 0;
}
-static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
{
u32 mac_addr_l, mac_addr_m, mac_addr_h;
@@ -5975,16 +6342,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-/* Driver initialization */
-
-static void mvpp2_port_power_up(struct mvpp2_port *port)
-{
- mvpp2_port_mii_set(port);
- mvpp2_port_periodic_xon_disable(port);
- mvpp2_port_fc_adv_enable(port);
- mvpp2_port_reset(port);
-}
-
/* Initialize port HW */
static int mvpp2_port_init(struct mvpp2_port *port)
{
@@ -5993,7 +6350,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2_txq_pcpu *txq_pcpu;
int queue, cpu, err;
- if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+ if (port->first_rxq + rxq_number >
+ MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
/* Disable port */
@@ -6061,7 +6419,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
}
/* Configure Rx queue group interrupt for this port */
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+ rxq_number);
+ } else {
+ u32 val;
+
+ val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -6103,8 +6472,7 @@ err_free_percpu:
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node,
- struct mvpp2 *priv,
- int *next_first_rxq)
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
struct mvpp2_port *port;
@@ -6117,7 +6485,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
u32 id;
int features;
int phy_mode;
- int priv_common_regs_num = 2;
int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
@@ -6163,16 +6530,30 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->priv = priv;
port->id = id;
- port->first_rxq = *next_first_rxq;
+ if (priv->hw_version == MVPP21)
+ port->first_rxq = port->id * rxq_number;
+ else
+ port->first_rxq = port->id * priv->max_port_rxqs;
+
port->phy_node = phy_node;
port->phy_interface = phy_mode;
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- priv_common_regs_num + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(port->base)) {
- err = PTR_ERR(port->base);
- goto err_free_irq;
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->base)) {
+ err = PTR_ERR(port->base);
+ goto err_free_irq;
+ }
+ } else {
+ if (of_property_read_u32(port_node, "gop-port-id",
+ &port->gop_id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing gop-port-id value\n");
+ goto err_free_irq;
+ }
+
+ port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
}
/* Alloc per-cpu stats */
@@ -6187,7 +6568,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mac_from = "device tree";
ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
- mvpp2_get_mac_address(port, hw_mac_addr);
+ if (priv->hw_version == MVPP21)
+ mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
ether_addr_copy(dev->dev_addr, hw_mac_addr);
@@ -6207,7 +6589,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to init port %d\n", id);
goto err_free_stats;
}
- mvpp2_port_power_up(port);
+
+ mvpp2_port_mii_set(port);
+ mvpp2_port_periodic_xon_disable(port);
+
+ if (priv->hw_version == MVPP21)
+ mvpp2_port_fc_adv_enable(port);
+
+ mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
@@ -6245,8 +6634,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- /* Increment the first Rx queue number to be used by the next port */
- *next_first_rxq += rxq_number;
priv->port_list[id] = port;
return 0;
@@ -6330,6 +6717,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+ u32 val, rdval, wrval;
+
+ mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+ /* AXI Bridge Configuration */
+
+ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ /* BM */
+ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+ /* Descriptors */
+ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+ /* Buffer Data */
+ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+ val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+ mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+ mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
/* Initialize network controller common part HW */
static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
{
@@ -6338,7 +6779,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
u32 val;
/* Checks for hardware constraints */
- if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+ if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
(txq_number > MVPP2_MAX_TXQ)) {
dev_err(&pdev->dev, "invalid queue size parameter\n");
return -EINVAL;
@@ -6349,10 +6790,19 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
+ if (priv->hw_version == MVPP22)
+ mvpp2_axi_init(priv);
+
/* Disable HW PHY polling */
- val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
- val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
- writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ if (priv->hw_version == MVPP21) {
+ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+ writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ } else {
+ val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ val &= ~MVPP22_SMI_POLLING_EN;
+ writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ }
/* Allocate and initialize aggregated TXQs */
priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
@@ -6374,11 +6824,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_rx_fifo_init(priv);
/* Reset Rx queue group interrupt configuration */
- for (i = 0; i < MVPP2_MAX_PORTS; i++)
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
+ rxq_number);
+ continue;
+ } else {
+ u32 val;
+
+ val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
- writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
- priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
+ }
+
+ if (priv->hw_version == MVPP21)
+ writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+ priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
/* Allow cache snoop when transmiting packets */
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
@@ -6405,22 +6869,46 @@ static int mvpp2_probe(struct platform_device *pdev)
struct device_node *port_node;
struct mvpp2 *priv;
struct resource *res;
- int port_count, first_rxq;
+ void __iomem *base;
+ int port_count, cpu;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->lms_base))
+ return PTR_ERR(priv->lms_base);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iface_base))
+ return PTR_ERR(priv->iface_base);
+ }
+
+ for_each_present_cpu(cpu) {
+ u32 addr_space_sz;
+
+ addr_space_sz = (priv->hw_version == MVPP21 ?
+ MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+ priv->cpu_base[cpu] = base + cpu * addr_space_sz;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->lms_base))
- return PTR_ERR(priv->lms_base);
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
if (IS_ERR(priv->pp_clk))
@@ -6438,21 +6926,47 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
+
/* Get system's tclk rate */
priv->tclk = clk_get_rate(priv->pp_clk);
+ if (priv->hw_version == MVPP22) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto err_mg_clk;
+ /* Sadly, the BM pools all share the same register to
+ * store the high 32 bits of their address. So they
+ * must all have the same high 32 bits, which forces
+ * us to restrict coherent memory to DMA_BIT_MASK(32).
+ */
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_mg_clk;
+ }
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
dev_err(&pdev->dev, "failed to initialize controller\n");
- goto err_gop_clk;
+ goto err_mg_clk;
}
port_count = of_get_available_child_count(dn);
if (port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
- goto err_gop_clk;
+ goto err_mg_clk;
}
priv->port_list = devm_kcalloc(&pdev->dev, port_count,
@@ -6460,20 +6974,22 @@ static int mvpp2_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!priv->port_list) {
err = -ENOMEM;
- goto err_gop_clk;
+ goto err_mg_clk;
}
/* Initialize ports */
- first_rxq = 0;
for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+ err = mvpp2_port_probe(pdev, port_node, priv);
if (err < 0)
- goto err_gop_clk;
+ goto err_mg_clk;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_mg_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
@@ -6506,9 +7022,10 @@ static int mvpp2_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
aggr_txq->descs,
- aggr_txq->descs_phys);
+ aggr_txq->descs_dma);
}
+ clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
@@ -6516,7 +7033,14 @@ static int mvpp2_remove(struct platform_device *pdev)
}
static const struct of_device_id mvpp2_match[] = {
- { .compatible = "marvell,armada-375-pp2" },
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = (void *)MVPP21,
+ },
+ {
+ .compatible = "marvell,armada-7k-pp22",
+ .data = (void *)MVPP22,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9e757684816d..bf6317eca2f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1908,10 +1908,9 @@ static int __init mtk_init(struct net_device *dev)
/* If the mac address is invalid, use random mac address */
if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
- dev->addr_assign_type = NET_ADDR_RANDOM;
}
return mtk_phy_connect(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c4d714fcc7da..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* port statistics */
"tso_packets",
"xmit_more",
- "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* pf statistics */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 9166d90e7328..e0eb695318e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
+ priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
@@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+ priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 867292880c07..aa074e57ce06 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -50,173 +50,62 @@
#include "mlx4_en.h"
-static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *page_alloc,
- const struct mlx4_en_frag_info *frag_info,
- gfp_t _gfp)
+static int mlx4_alloc_page(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag,
+ gfp_t gfp)
{
- int order;
struct page *page;
dma_addr_t dma;
- for (order = frag_info->order; ;) {
- gfp_t gfp = _gfp;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
- page = alloc_pages(gfp, order);
- if (likely(page))
- break;
- if (--order < 0 ||
- ((PAGE_SIZE << order) < frag_info->frag_size))
- return -ENOMEM;
- }
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
- frag_info->dma_dir);
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ return -ENOMEM;
+ dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- put_page(page);
+ __free_page(page);
return -ENOMEM;
}
- page_alloc->page_size = PAGE_SIZE << order;
- page_alloc->page = page;
- page_alloc->dma = dma;
- page_alloc->page_offset = 0;
- /* Not doing get_page() for each frag is a big win
- * on asymetric workloads. Note we can not use atomic_set().
- */
- page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
+ frag->page = page;
+ frag->dma = dma;
+ frag->page_offset = priv->rx_headroom;
return 0;
}
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
- struct mlx4_en_rx_alloc *ring_alloc,
gfp_t gfp)
{
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
- const struct mlx4_en_frag_info *frag_info;
- struct page *page;
int i;
- for (i = 0; i < priv->num_frags; i++) {
- frag_info = &priv->frag_info[i];
- page_alloc[i] = ring_alloc[i];
- page_alloc[i].page_offset += frag_info->frag_stride;
-
- if (page_alloc[i].page_offset + frag_info->frag_stride <=
- ring_alloc[i].page_size)
- continue;
-
- if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
- frag_info, gfp)))
- goto out;
- }
-
- for (i = 0; i < priv->num_frags; i++) {
- frags[i] = ring_alloc[i];
- frags[i].page_offset += priv->frag_info[i].rx_headroom;
- rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
- frags[i].page_offset);
- ring_alloc[i] = page_alloc[i];
- }
-
- return 0;
-
-out:
- while (i--) {
- if (page_alloc[i].page != ring_alloc[i].page) {
- dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc[i].page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc[i].page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
+ for (i = 0; i < priv->num_frags; i++, frags++) {
+ if (!frags->page) {
+ if (mlx4_alloc_page(priv, frags, gfp))
+ return -ENOMEM;
+ ring->rx_alloc_pages++;
}
- }
- return -ENOMEM;
-}
-
-static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frags,
- int i)
-{
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
-
-
- if (next_frag_end > frags[i].page_size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
- frag_info->dma_dir);
-
- if (frags[i].page)
- put_page(frags[i].page);
-}
-
-static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
-{
- int i;
- struct mlx4_en_rx_alloc *page_alloc;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
- frag_info, GFP_KERNEL | __GFP_COLD))
- goto out;
-
- en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
- i, ring->page_alloc[i].page_size,
- page_ref_count(ring->page_alloc[i].page));
+ rx_desc->data[i].addr = cpu_to_be64(frags->dma +
+ frags->page_offset);
}
return 0;
-
-out:
- while (i--) {
- struct page *page;
-
- page_alloc = &ring->page_alloc[i];
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc->page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc->page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
- page_alloc->page = NULL;
- }
- return -ENOMEM;
}
-static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag)
{
- struct mlx4_en_rx_alloc *page_alloc;
- int i;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- page_alloc = &ring->page_alloc[i];
- en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
- i, page_count(page_alloc->page));
-
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, frag_info->dma_dir);
- while (page_alloc->page_offset + frag_info->frag_stride <
- page_alloc->page_size) {
- put_page(page_alloc->page);
- page_alloc->page_offset += frag_info->frag_stride;
- }
- page_alloc->page = NULL;
+ if (frag->page) {
+ dma_unmap_page(priv->ddev, frag->dma,
+ PAGE_SIZE, priv->dma_dir);
+ __free_page(frag->page);
}
+ /* We need to clear all fields, otherwise a change of priv->log_rx_info
+ * could lead to see garbage later in frag->page.
+ */
+ memset(frag, 0, sizeof(*frag));
}
-static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
@@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
-
if (ring->page_cache.index > 0) {
- frags[0] = ring->page_cache.buf[--ring->page_cache.index];
- rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
- frags[0].page_offset);
+ /* XDP uses a single page per frame */
+ if (!frags->page) {
+ ring->page_cache.index--;
+ frags->page = ring->page_cache.buf[ring->page_cache.index].page;
+ frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
+ }
+ frags->page_offset = XDP_PACKET_HEADROOM;
+ rx_desc->data[0].addr = cpu_to_be64(frags->dma +
+ XDP_PACKET_HEADROOM);
return 0;
}
- return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
-static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
{
return ring->prod == ring->cons;
}
@@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+/* slow path */
+static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
@@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags, nr);
+ mlx4_en_free_frag(priv, frags + nr);
}
}
@@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- while (!mlx4_en_is_ring_empty(ring)) {
- index = ring->cons & ring->size_mask;
+ for (index = 0; index < ring->size; index++) {
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
- ++ring->cons;
}
+ ring->cons = 0;
+ ring->prod = 0;
}
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
@@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc_node(tmp, node);
+ ring->rx_info = vzalloc_node(tmp, node);
if (!ring->rx_info) {
- ring->rx_info = vmalloc(tmp);
+ ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
goto err_ring;
@@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
-
- /* Initialize page allocators */
- err = mlx4_en_init_allocator(priv, ring);
- if (err) {
- en_err(priv, "Failed initializing ring allocator\n");
- if (ring->stride <= TXBB_SIZE)
- ring->buf -= TXBB_SIZE;
- ring_ind--;
- goto err_allocator;
- }
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -493,11 +378,9 @@ err_buffers:
mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
-err_allocator:
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
@@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
if (cache->index >= MLX4_EN_CACHE_SIZE)
return false;
- cache->buf[cache->index++] = *frame;
+ cache->buf[cache->index].page = frame->page;
+ cache->buf[cache->index].dma = frame->dma;
+ cache->index++;
return true;
}
@@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
int i;
for (i = 0; i < ring->page_cache.index; i++) {
- struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
-
- dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
- priv->frag_info[0].dma_dir);
- put_page(frame->page);
+ dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(ring->page_cache.buf[i].page);
}
ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, ring);
}
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
int length)
{
- struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
- struct mlx4_en_frag_info *frag_info;
- int nr;
+ const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+ unsigned int truesize = 0;
+ int nr, frag_size;
+ struct page *page;
dma_addr_t dma;
+ bool release;
/* Collect used fragments while replacing them in the HW descriptors */
- for (nr = 0; nr < priv->num_frags; nr++) {
- frag_info = &priv->frag_info[nr];
- if (length <= frag_info->frag_prefix_size)
- break;
- if (unlikely(!frags[nr].page))
+ for (nr = 0;; frags++) {
+ frag_size = min_t(int, length, frag_info->frag_size);
+
+ page = frags->page;
+ if (unlikely(!page))
goto fail;
- dma = be64_to_cpu(rx_desc->data[nr].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
- DMA_FROM_DEVICE);
+ dma = frags->dma;
+ dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
+ frag_size, priv->dma_dir);
+
+ __skb_fill_page_desc(skb, nr, page, frags->page_offset,
+ frag_size);
- __skb_fill_page_desc(skb, nr, frags[nr].page,
- frags[nr].page_offset,
- frag_info->frag_size);
+ truesize += frag_info->frag_stride;
+ if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ frags->page_offset ^= PAGE_SIZE / 2;
+ release = page_count(page) != 1 ||
+ page_is_pfmemalloc(page) ||
+ page_to_nid(page) != numa_mem_id();
+ } else {
+ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
- skb->truesize += frag_info->frag_stride;
- frags[nr].page = NULL;
+ frags->page_offset += sz_align;
+ release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
+ }
+ if (release) {
+ dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
+ frags->page = NULL;
+ } else {
+ page_ref_inc(page);
+ }
+
+ nr++;
+ length -= frag_size;
+ if (!length)
+ break;
+ frag_info++;
}
- /* Adjust size of last fragment to match actual length */
- if (nr > 0)
- skb_frag_size_set(&skb_frags_rx[nr - 1],
- length - priv->frag_info[nr - 1].frag_prefix_size);
+ skb->truesize += truesize;
return nr;
fail:
while (nr > 0) {
nr--;
- __skb_frag_unref(&skb_frags_rx[nr]);
+ __skb_frag_unref(skb_shinfo(skb)->frags + nr);
}
return 0;
}
-
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mlx4_en_rx_alloc *frags,
- unsigned int length)
-{
- struct sk_buff *skb;
- void *va;
- int used_frags;
- dma_addr_t dma;
-
- skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (unlikely(!skb)) {
- en_dbg(RX_ERR, priv, "Failed allocating skb\n");
- return NULL;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- skb->len = length;
-
- /* Get pointer to first fragment so we could copy the headers into the
- * (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].page_offset;
-
- if (length <= SMALL_PACKET_SIZE) {
- /* We are copying all relevant data to the skb - temporarily
- * sync buffers for the copy */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, va, length);
- skb->tail += length;
- } else {
- unsigned int pull_len;
-
- /* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
- skb, length);
- if (unlikely(!used_frags)) {
- kfree_skb(skb);
- return NULL;
- }
- skb_shinfo(skb)->nr_frags = used_frags;
-
- pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
- /* Copy headers into the skb linear buffer */
- memcpy(skb->data, va, pull_len);
- skb->tail += pull_len;
-
- /* Skip headers in first fragment */
- skb_shinfo(skb)->frags[0].page_offset += pull_len;
-
- /* Adjust size of first fragment */
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
- skb->data_len = length - pull_len;
- }
- return skb;
-}
-
-static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+static void validate_loopback(struct mlx4_en_priv *priv, void *va)
{
+ const unsigned char *data = va + ETH_HLEN;
int i;
- int offset = ETH_HLEN;
- for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
- if (*(skb->data + offset) != (unsigned char) (i & 0xff))
- goto out_loopback;
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
+ if (data[i] != (unsigned char)i)
+ return;
}
/* Loopback found */
priv->loopback_ok = 1;
-
-out_loopback:
- dev_kfree_skb_any(skb);
}
static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
@@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
- struct mlx4_en_rx_desc *rx_desc;
struct bpf_prog *xdp_prog;
int doorbell_pending;
struct sk_buff *skb;
@@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
+ void *va;
frags = ring->rx_info + (index << priv->log_rx_info);
- rx_desc = ring->buf + (index << ring->log_stride);
-
+ va = page_address(frags[0].page) + frags[0].page_offset;
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
- struct ethhdr *ethh;
+ const struct ethhdr *ethh = va;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
@@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
}
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, va);
+ goto next;
+ }
+
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
/* A bpf program gets first chance to drop the packet. It may
* read bytes but not past the end of the frag.
@@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = page_address(frags[0].page);
- xdp.data = xdp.data_hard_start + frags[0].page_offset;
+ xdp.data_hard_start = va - frags[0].page_offset;
+ xdp.data = va;
xdp.data_end = xdp.data + length;
orig_data = xdp.data;
@@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length = xdp.data_end - xdp.data;
frags[0].page_offset = xdp.data -
xdp.data_hard_start;
+ va = xdp.data;
}
switch (act) {
@@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
length, cq->ring,
- &doorbell_pending)))
- goto consumed;
+ &doorbell_pending))) {
+ frags[0].page = NULL;
+ goto next;
+ }
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
@@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
- if (likely(mlx4_en_rx_recycle(ring, frags)))
- goto consumed;
goto next;
}
}
@@ -948,129 +789,51 @@ xdp_drop_no_cnt:
ring->bytes += length;
ring->packets++;
+ skb = napi_get_frags(&cq->napi);
+ if (!skb)
+ goto next;
+
+ if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ timestamp);
+ }
+ skb_record_rx_queue(skb, cq->ring);
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
ip_summed = CHECKSUM_UNNECESSARY;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ if (l2_tunnel)
+ skb->csum_level = 1;
ring->csum_ok++;
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
} else {
if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_STATUS_IPV6))) {
- ip_summed = CHECKSUM_COMPLETE;
- ring->csum_complete++;
+ if (check_csum(cqe, skb, va, dev->features)) {
+ goto csum_none;
+ } else {
+ ip_summed = CHECKSUM_COMPLETE;
+ ring->csum_complete++;
+ }
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
}
} else {
+csum_none:
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
}
-
- /* This packet is eligible for GRO if it is:
- * - DIX Ethernet (type interpretation)
- * - TCP/IP (v4)
- * - without IP options
- * - not an IP fragment
- */
- if (dev->features & NETIF_F_GRO) {
- struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
- if (!gro_skb)
- goto next;
-
- nr = mlx4_en_complete_rx_desc(priv,
- rx_desc, frags, gro_skb,
- length);
- if (!nr)
- goto next;
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
- if (check_csum(cqe, gro_skb, va,
- dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
- ring->csum_complete--;
- }
- }
-
- skb_shinfo(gro_skb)->nr_frags = nr;
- gro_skb->len = length;
- gro_skb->data_len = length;
- gro_skb->ip_summed = ip_summed;
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- gro_skb->csum_level = 1;
-
- if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vid = be16_to_cpu(cqe->sl_vid);
-
- __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
- } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
- (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
- __vlan_hwaccel_put_tag(gro_skb,
- htons(ETH_P_8021AD),
- be16_to_cpu(cqe->sl_vid));
- }
-
- if (dev->features & NETIF_F_RXHASH)
- skb_set_hash(gro_skb,
- be32_to_cpu(cqe->immed_rss_invalid),
- (ip_summed == CHECKSUM_UNNECESSARY) ?
- PKT_HASH_TYPE_L4 :
- PKT_HASH_TYPE_L3);
-
- skb_record_rx_queue(gro_skb, cq->ring);
-
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev,
- skb_hwtstamps(gro_skb),
- timestamp);
- }
-
- napi_gro_frags(&cq->napi);
- goto next;
- }
-
- /* GRO not possible, complete processing here */
- skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (unlikely(!skb)) {
- ring->dropped++;
- goto next;
- }
-
- if (unlikely(priv->validate_loopback)) {
- validate_loopback(priv, skb);
- goto next;
- }
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- if (check_csum(cqe, skb, skb->data, dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_complete--;
- ring->csum_none++;
- }
- }
-
skb->ip_summed = ip_summed;
- skb->protocol = eth_type_trans(skb, dev);
- skb_record_rx_queue(skb, cq->ring);
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- skb->csum_level = 1;
-
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
@@ -1078,36 +841,36 @@ xdp_drop_no_cnt:
PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_L3);
- if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_CVLAN_PRESENT_MASK) &&
+
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
- else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->sl_vid));
+ else if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
- timestamp);
+ nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
+ if (likely(nr)) {
+ skb_shinfo(skb)->nr_frags = nr;
+ skb->len = length;
+ skb->data_len = length;
+ napi_gro_frags(&cq->napi);
+ } else {
+ skb->vlan_tci = 0;
+ skb_clear_hash(skb);
}
-
- napi_gro_receive(&cq->napi, skb);
next:
- for (nr = 0; nr < priv->num_frags; nr++)
- mlx4_en_free_frag(priv, frags, nr);
-
-consumed:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
if (++polled == budget)
- goto out;
+ break;
}
-out:
rcu_read_unlock();
if (polled) {
@@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return done;
}
-static const int frag_sizes[] = {
- FRAG_SZ0,
- FRAG_SZ1,
- FRAG_SZ2,
- FRAG_SZ3
-};
-
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
* This only works when num_frags == 1.
*/
if (priv->tx_ring_num[TX_XDP]) {
- priv->frag_info[0].order = 0;
priv->frag_info[0].frag_size = eff_mtu;
- priv->frag_info[0].frag_prefix_size = 0;
/* This will gain efficient xdp frame recycling at the
* expense of more costly truesize accounting
*/
priv->frag_info[0].frag_stride = PAGE_SIZE;
- priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
- priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
+ priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->rx_headroom = XDP_PACKET_HEADROOM;
i = 1;
} else {
- int buf_size = 0;
+ int frag_size_max = 2048, buf_size = 0;
+
+ /* should not happen, right ? */
+ if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
+ frag_size_max = PAGE_SIZE;
while (buf_size < eff_mtu) {
- priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER;
- priv->frag_info[i].frag_size =
- (eff_mtu > buf_size + frag_sizes[i]) ?
- frag_sizes[i] : eff_mtu - buf_size;
- priv->frag_info[i].frag_prefix_size = buf_size;
- priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size,
- SMP_CACHE_BYTES);
- priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
- priv->frag_info[i].rx_headroom = 0;
- buf_size += priv->frag_info[i].frag_size;
+ int frag_stride, frag_size = eff_mtu - buf_size;
+ int pad, nb;
+
+ if (i < MLX4_EN_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ priv->frag_info[i].frag_size = frag_size;
+ frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
+ /* We can only pack 2 1536-bytes frames in on 4K page
+ * Therefore, each frame would consume more bytes (truesize)
+ */
+ nb = PAGE_SIZE / frag_stride;
+ pad = (PAGE_SIZE - nb * frag_stride) / nb;
+ pad &= ~(SMP_CACHE_BYTES - 1);
+ priv->frag_info[i].frag_stride = frag_stride + pad;
+
+ buf_size += frag_size;
i++;
}
+ priv->dma_dir = PCI_DMA_FROMDEVICE;
+ priv->rx_headroom = 0;
}
priv->num_frags = i;
@@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d stride:%d\n",
+ " frag:%d - size:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
- priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_stride);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 95290e1fc9fe..17112faafbcc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
- bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
- gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
- priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -111,9 +108,6 @@ mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
- if (gro_enabled)
- priv->dev->features |= NETIF_F_GRO;
-
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ed42199d3f1..e0c5ffb3e3a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
- .page_offset = XDP_PACKET_HEADROOM,
- .page_size = PAGE_SIZE,
};
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->frag_info[0].dma_dir);
+ PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3629ce11a68b..39f401aa3047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -102,17 +102,6 @@
/* Use the maximum between 16384 and a single page */
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
-#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
- PAGE_ALLOC_COSTLY_ORDER)
-
-/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
- * and 4K allocations) */
-enum {
- FRAG_SZ0 = 1536 - NET_IP_ALIGN,
- FRAG_SZ1 = 4096,
- FRAG_SZ2 = 4096,
- FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
-};
#define MLX4_EN_MAX_RX_FRAGS 4
/* Maximum ring sizes */
@@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
u32 page_offset;
- u32 page_size;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+
struct mlx4_en_page_cache {
u32 index;
- struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+ struct {
+ struct page *page;
+ dma_addr_t dma;
+ } buf[MLX4_EN_CACHE_SIZE];
};
struct mlx4_en_priv;
@@ -335,7 +327,6 @@ struct mlx4_en_rx_desc {
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long rx_alloc_pages;
unsigned long xdp_drop;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
@@ -472,11 +464,7 @@ struct mlx4_en_mc_list {
struct mlx4_en_frag_info {
u16 frag_size;
- u16 frag_prefix_size;
u32 frag_stride;
- enum dma_data_direction dma_dir;
- u16 order;
- u16 rx_headroom;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -584,8 +572,10 @@ struct mlx4_en_priv {
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
- u16 num_frags;
- u16 log_rx_info;
+ u8 num_frags;
+ u8 log_rx_info;
+ u8 dma_dir;
+ u16 rx_headroom;
struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 48641cb0367f..926f3c3f3665 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -37,7 +37,7 @@ struct mlx4_en_port_stats {
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
- unsigned long rx_alloc_failed;
+ unsigned long rx_alloc_pages;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long rx_chksum_complete;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 6b6c30deee83..95fcacf9c8be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o
+ spectrum_acl.o spectrum_flower.o \
+ spectrum_cnt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index a1b48421648a..479511cf79bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_cq_oi
- * When set, overrun ignore is enabled. When set, updates of
- * CQ consumer counter (poll for completion) or Request completion
- * notifications (Arm CQ) DoorBells should not be rung on that CQ.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_cq_st
* Event delivery state machine
* 0x0 - FIRED
@@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_eq_oi
- * When set, overrun ignore is enabled.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_eq_st
* Event delivery state machine
* 0x0 - FIRED
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5f337715a4da..a984c361926c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
}
+/* VLAN Action
+ * -----------
+ * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
+ * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
+ * and more.
+ */
+
+#define MLXSW_AFA_VLAN_CODE 0x02
+#define MLXSW_AFA_VLAN_SIZE 1
+
+enum mlxsw_afa_vlan_vlan_tag_cmd {
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
+};
+
+enum mlxsw_afa_vlan_cmd {
+ MLXSW_AFA_VLAN_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER,
+ MLXSW_AFA_VLAN_CMD_SET_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
+ MLXSW_AFA_VLAN_CMD_SWAP,
+};
+
+/* afa_vlan_vlan_tag_cmd
+ * Tag command: push, pop, nop VLAN header.
+ */
+MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
+
+/* afa_vlan_vid_cmd */
+MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
+
+/* afa_vlan_vid */
+MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
+
+/* afa_vlan_ethertype_cmd */
+MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
+
+/* afa_vlan_ethertype
+ * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
+ */
+MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
+
+/* afa_vlan_pcp_cmd */
+MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
+
+/* afa_vlan_pcp */
+MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
+
+static inline void
+mlxsw_afa_vlan_pack(char *payload,
+ enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
+ enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
+ enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
+ enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
+{
+ mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
+ mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
+ mlxsw_afa_vlan_vid_set(payload, vid);
+ mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
+ mlxsw_afa_vlan_pcp_set(payload, pcp);
+ mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
+ mlxsw_afa_vlan_ethertype_set(payload, ethertype);
+}
+
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VLAN_CODE,
+ MLXSW_AFA_VLAN_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
+
/* Trap / Discard Action
* ---------------------
* The Trap / Discard action enables trapping / mirroring packets to the CPU
@@ -677,3 +760,54 @@ err_append_action:
return err;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+
+/* Policing and Counting Action
+ * ----------------------------
+ * Policing and Counting action is used for binding policer and counter
+ * to ACL rules.
+ */
+
+#define MLXSW_AFA_POLCNT_CODE 0x08
+#define MLXSW_AFA_POLCNT_SIZE 1
+
+enum mlxsw_afa_polcnt_counter_set_type {
+ /* No count */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* afa_polcnt_counter_set_type
+ * Counter set type for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
+
+/* afa_polcnt_counter_index
+ * Counter index for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
+
+static inline void
+mlxsw_afa_polcnt_pack(char *payload,
+ enum mlxsw_afa_polcnt_counter_set_type set_type,
+ u32 counter_index)
+{
+ mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
+ mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
+}
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_POLCNT_CODE,
+ MLXSW_AFA_POLCNT_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
+ counter_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43f78dcfe394..a03362c1ef32 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -62,5 +62,9 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index e4fcba7c2af2..c75e9141e3ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -54,6 +54,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_DST_IP6_LO,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -88,7 +90,7 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-/* For the purpose of the driver, define a internal storage scratchpad
+/* For the purpose of the driver, define an internal storage scratchpad
* that will be used to store key/mask values. For each defined element type
* define an internal storage geometry.
*/
@@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a223c85dfde0..ffeb746fe2f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -580,7 +580,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -755,7 +754,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d9616daf8a70..e7a652c43b5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4141,7 +4141,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
enum mlxsw_reg_ritr_if_type type,
- u16 rif, u16 mtu, const char *mac)
+ u16 rif, u16 vr_id, u16 mtu,
+ const char *mac)
{
bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
@@ -4153,6 +4154,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
@@ -5504,6 +5506,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
mlxsw_reg_mpsc_rate_set(payload, rate);
}
+/* MGPC - Monitoring General Purpose Counter Set Register
+ * The MGPC register retrieves and sets the General Purpose Counter Set.
+ */
+#define MLXSW_REG_MGPC_ID 0x9081
+#define MLXSW_REG_MGPC_LEN 0x18
+
+MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
+
+enum mlxsw_reg_mgpc_counter_set_type {
+ /* No count */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_mgpc_counter_set_type
+ * Counter set type.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
+
+/* reg_mgpc_counter_index
+ * Counter index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
+
+enum mlxsw_reg_mgpc_opcode {
+ /* Nop */
+ MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
+ /* Clear counters */
+ MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_mgpc_opcode
+ * Opcode.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
+
+/* reg_mgpc_byte_counter
+ * Byte counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
+
+/* reg_mgpc_packet_counter
+ * Packet counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
+
+static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
+ enum mlxsw_reg_mgpc_opcode opcode,
+ enum mlxsw_reg_mgpc_counter_set_type set_type)
+{
+ MLXSW_REG_ZERO(mgpc, payload);
+ mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
+ mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
+ mlxsw_reg_mgpc_opcode_set(payload, opcode);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5977,6 +6043,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index bce8c2e00630..905a8e269f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,7 +43,9 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
+ MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
@@ -75,7 +77,9 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+ [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16484f24b7db..475499b6c989 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -66,6 +66,7 @@
#include "port.h"
#include "trap.h"
#include "txheader.h"
+#include "spectrum_cnt.h"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -138,6 +139,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+ int err;
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+ if (err)
+ return err;
+ *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
+ *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
+ return 0;
+}
+
+static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+}
+
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index)
+{
+ int err;
+
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ p_counter_index);
+ if (err)
+ return err;
+ err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+ return 0;
+
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ counter_index);
+}
+
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -1379,6 +1434,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
tc->cls_flower);
return 0;
+ case TC_CLSFLOWER_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
+ tc->cls_flower);
default:
return -EOPNOTSUPP;
}
@@ -3224,6 +3282,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3233,6 +3297,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
@@ -3255,6 +3321,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
@@ -3326,13 +3393,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
-static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
+static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
{
- struct mlxsw_sp_port **port = data;
+ struct mlxsw_sp_port **p_mlxsw_sp_port = data;
int ret = 0;
if (mlxsw_sp_port_dev_check(lower_dev)) {
- *port = netdev_priv(lower_dev);
+ *p_mlxsw_sp_port = netdev_priv(lower_dev);
ret = 1;
}
@@ -3341,18 +3408,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
-static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3362,15 +3429,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
+ &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3390,546 +3458,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
- unsigned long event)
-{
- switch (event) {
- case NETDEV_UP:
- if (!r)
- return true;
- r->ref_count++;
- return false;
- case NETDEV_DOWN:
- if (r && --r->ref_count == 0)
- return true;
- /* It is possible we already removed the RIF ourselves
- * if it was assigned to a netdev that is now a bridge
- * or LAG slave.
- */
- return false;
- }
-
- return false;
-}
-
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
- return i;
-
- return MLXSW_SP_INVALID_RIF;
-}
-
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
-{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev, u16 rif,
- bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
- l3_dev->mtu, l3_dev->dev_addr);
-
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
-
- return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- return NULL;
-
- INIT_LIST_HEAD(&r->nexthop_list);
- INIT_LIST_HEAD(&r->neigh_list);
- ether_addr_copy(r->addr, l3_dev->dev_addr);
- r->mtu = l3_dev->mtu;
- r->ref_count = 1;
- r->dev = l3_dev;
- r->rif = rif;
- r->f = f;
-
- return r;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_fid *f;
- struct mlxsw_sp_rif *r;
- u16 fid, rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return ERR_PTR(-ERANGE);
-
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
- if (err)
- return ERR_PTR(err);
-
- fid = mlxsw_sp_rif_sp_to_fid(rif);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
- }
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- return r;
-
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *r)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 fid = f->fid;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
-}
-
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_rif *r;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!r) {
- r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
- if (IS_ERR(r))
- return PTR_ERR(r);
- }
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
- r->f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
-
- return 0;
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
-}
-
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
- case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
- return 0;
-
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
-}
-
-static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
- struct net_device *lag_dev,
- unsigned long event, u16 vid)
-{
- struct net_device *port_dev;
- struct list_head *iter;
- int err;
-
- netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
- if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(lag_dev))
- return 0;
-
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, MLXSW_PORT_ROUTER_PORT, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
- u16 rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return -ERANGE;
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
-}
-
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
- unsigned long event)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
- case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
- unsigned long event)
-{
- struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_lag_master(real_dev))
- return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!mlxsw_sp_rif_should_config(r, event))
- goto out;
-
- if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_inetaddr_port_event(dev, event);
- else if (netif_is_lag_master(dev))
- err = mlxsw_sp_inetaddr_lag_event(dev, event);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
- else if (is_vlan_dev(dev))
- err = mlxsw_sp_inetaddr_vlan_event(dev, event);
-
-out:
- return notifier_from_errno(err);
-}
-
-static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
- const char *mac, int mtu)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (err)
- return err;
-
- mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
- mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
- mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
-{
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- return 0;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!r)
- return 0;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
- if (err)
- goto err_rif_edit;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- ether_addr_copy(r->addr, dev->dev_addr);
- r->mtu = dev->mtu;
-
- netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
-
- return 0;
-
-err_rif_fdb_op:
- mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
-err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
- return err;
-}
-
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid)
{
@@ -4220,7 +3748,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 lag_id)
+ struct net_device *lag_dev, u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
@@ -4238,6 +3766,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->lag_id = lag_id;
mlxsw_sp_vport->lagged = 1;
+ mlxsw_sp_vport->dev = lag_dev;
}
static void
@@ -4254,6 +3783,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
if (f)
f->leave(mlxsw_sp_vport);
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->lagged = 0;
}
@@ -4293,7 +3823,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
return 0;
@@ -4564,33 +4094,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(br_dev);
if (!mlxsw_sp)
return 0;
- if (br_dev != mlxsw_sp->master_bridge.dev)
- return 0;
info = ptr;
switch (event) {
- case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev))
- break;
- if (info->linking) {
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- if (err)
- return err;
+ return -EINVAL;
+ if (is_vlan_dev(upper_dev) &&
+ br_dev != mlxsw_sp->master_bridge.dev)
+ return -EINVAL;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (is_vlan_dev(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ else
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
+ upper_dev);
} else {
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return 0;
+ return err;
}
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
@@ -4810,6 +4347,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport)
+ return 0;
switch (event) {
case NETDEV_PRECHANGEUPPER:
@@ -4827,16 +4366,17 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (info->linking) {
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
+ upper_dev);
+ else
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
} else {
- if (!mlxsw_sp_vport)
- return 0;
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ err = -EINVAL;
+ WARN_ON(1);
}
+ break;
}
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13ec85e7c392..5502232b06cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -58,7 +58,6 @@
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -92,6 +91,7 @@ static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
}
struct mlxsw_sp_port;
+struct mlxsw_sp_rif;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -107,17 +107,6 @@ struct mlxsw_sp_fid {
u16 fid;
};
-struct mlxsw_sp_rif {
- struct list_head nexthop_list;
- struct list_head neigh_list;
- struct net_device *dev;
- unsigned int ref_count;
- struct mlxsw_sp_fid *f;
- unsigned char addr[ETH_ALEN];
- int mtu;
- u16 rif;
-};
-
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -141,16 +130,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
}
-static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
-{
- return fid >= MLXSW_SP_RFID_BASE;
-}
-
-static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
-{
- return MLXSW_SP_RFID_BASE + rif;
-}
-
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
@@ -207,11 +186,9 @@ struct mlxsw_sp_fib;
struct mlxsw_sp_vr {
u16 id; /* virtual router ID */
- bool used;
- enum mlxsw_sp_l3proto proto;
u32 tb_id; /* kernel fib table id */
- struct mlxsw_sp_lpm_tree *lpm_tree;
- struct mlxsw_sp_fib *fib;
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
};
enum mlxsw_sp_span_type {
@@ -269,6 +246,7 @@ struct mlxsw_sp_router {
};
struct mlxsw_sp_acl;
+struct mlxsw_sp_counter_pool;
struct mlxsw_sp {
struct {
@@ -304,6 +282,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
+ struct mlxsw_sp_counter_pool *counter_pool;
struct {
struct mlxsw_sp_span_entry *entries;
int entries_count;
@@ -386,6 +365,7 @@ struct mlxsw_sp_port {
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -497,19 +477,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-static inline struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
-
- return NULL;
-}
-
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BC,
@@ -570,8 +537,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -608,8 +573,11 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
@@ -620,6 +588,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
+ unsigned int counter_index;
+ bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -639,6 +609,8 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
};
struct mlxsw_sp_acl_ops {
@@ -679,6 +651,11 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio);
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei);
struct mlxsw_sp_acl_rule;
@@ -698,6 +675,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
unsigned long cookie);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -708,5 +688,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8a18b3aa70dc..4d6920d45026 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/tc_act/tc_vlan.h>
#include "reg.h"
#include "core.h"
@@ -49,10 +50,17 @@
#include "spectrum_acl_flex_keys.h"
struct mlxsw_sp_acl {
+ struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
+ struct list_head rules;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
+ } rule_activity_update;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_rule {
struct rhash_head ht_node; /* Member of rule HT */
+ struct list_head list;
unsigned long cookie; /* HT key */
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule_info *rulei;
+ u64 last_used;
+ u64 last_packets;
+ u64 last_bytes;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
+static int
+mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ rulei->counter_valid = false;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -335,6 +368,41 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio)
+{
+ u8 ethertype;
+
+ if (action == TCA_VLAN_ACT_MODIFY) {
+ switch (proto) {
+ case ETH_P_8021Q:
+ ethertype = 0;
+ break;
+ case ETH_P_8021AD:
+ ethertype = 1;
+ break;
+ default:
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
+ proto);
+ return -EINVAL;
+ }
+
+ return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
+ vid, prio, ethertype);
+ } else {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
+ return -EINVAL;
+ }
+}
+
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_counter(rulei->act_block,
+ rulei->counter_index);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
@@ -358,8 +426,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
}
+
+ err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
+ if (err)
+ goto err_counter_alloc;
return rule;
+err_counter_alloc:
+ mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -372,6 +446,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -393,6 +468,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
return 0;
err_rhashtable_insert:
@@ -406,6 +482,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
@@ -426,6 +503,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
return rule->rulei;
}
+static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ bool active;
+ int err;
+
+ err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
+ if (err)
+ return err;
+ if (active)
+ rule->last_used = jiffies;
+ return 0;
+}
+
+static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
+{
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ list_for_each_entry(rule, &acl->rules, list) {
+ err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
+ rule);
+ if (err)
+ goto err_rule_update;
+ }
+ rtnl_unlock();
+ return 0;
+
+err_rule_update:
+ rtnl_unlock();
+ return err;
+}
+
+static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
+{
+ unsigned long interval = acl->rule_activity_update.interval;
+
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
+ rule_activity_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_acl_rules_activity_update(acl);
+ if (err)
+ dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
+
+ mlxsw_sp_acl_rule_activity_work_schedule(acl);
+}
+
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets - rule->last_packets;
+ *bytes = current_bytes - rule->last_bytes;
+ *last_use = rule->last_used;
+
+ rule->last_bytes = current_bytes;
+ rule->last_packets = current_packets;
+
+ return 0;
+}
+
#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
@@ -518,7 +679,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
-
+ acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
mlxsw_sp_afk_blocks,
@@ -541,11 +702,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
goto err_acl_ops_init;
acl->ops = acl_ops;
+
+ /* Create the delayed work for the rule activity_update */
+ INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
+ mlxsw_sp_acl_rul_activity_update_work);
+ acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
err_acl_ops_init:
@@ -564,7 +732,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
+ cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
+ WARN_ON(!list_empty(&acl->rules));
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index 82b81cf7f4a7..af7b7bad48df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -39,11 +39,15 @@
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
@@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7382832215fa..3a24289979d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
+static int
+mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ region->tcam_region_info, offset);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
static int
@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
+static int
+mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
+ entry->parman_item.index,
+ activity);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC,
@@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_DST_IP4,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
@@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
}
+static int
+mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+ activity);
+}
+
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
.ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
@@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
static const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644
index 000000000000..1631e01908c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -0,0 +1,198 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum_cnt.h"
+
+#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
+
+struct mlxsw_sp_counter_sub_pool {
+ unsigned int base_index;
+ unsigned int size;
+ unsigned int entry_size;
+ unsigned int bank_count;
+};
+
+struct mlxsw_sp_counter_pool {
+ unsigned int pool_size;
+ unsigned long *usage; /* Usage bitmap */
+ struct mlxsw_sp_counter_sub_pool *sub_pools;
+};
+
+static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+ [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .bank_count = 6,
+ },
+};
+
+static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int total_bank_config = 0;
+ unsigned int pool_size;
+ int i;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ /* Check config is valid, no bank over subscription */
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
+ total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
+ if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
+ return -EINVAL;
+ return 0;
+}
+
+static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+
+ /* Prepare generic flow pool*/
+ sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
+ return -EIO;
+ sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ COUNTER_SIZE_PACKETS_BYTES);
+ return 0;
+}
+
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ struct mlxsw_sp_counter_pool *pool;
+ unsigned int base_index;
+ unsigned int map_size;
+ int i;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
+ return -EIO;
+
+ err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
+ if (err)
+ return err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
+
+ pool->usage = kzalloc(map_size, GFP_KERNEL);
+ if (!pool->usage) {
+ err = -ENOMEM;
+ goto err_usage_alloc;
+ }
+
+ pool->sub_pools = mlxsw_sp_counter_sub_pools;
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &pool->sub_pools[i];
+ sub_pool->size = sub_pool->bank_count *
+ MLXSW_SP_COUNTER_POOL_BANK_SIZE;
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ /* The last bank can't be fully used */
+ if (sub_pool->base_index + sub_pool->size > pool->pool_size)
+ sub_pool->size = pool->pool_size - sub_pool->base_index;
+ }
+
+ mlxsw_sp->counter_pool = pool;
+ return 0;
+
+err_usage_alloc:
+ kfree(pool);
+ return err;
+}
+
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+
+ WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+ pool->pool_size);
+ kfree(pool->usage);
+ kfree(pool);
+}
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int entry_index;
+ unsigned int stop_index;
+ int i;
+
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ stop_index = sub_pool->base_index + sub_pool->size;
+ entry_index = sub_pool->base_index;
+
+ entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
+ if (entry_index == stop_index)
+ return -ENOBUFS;
+ /* The sub-pools can contain non-integer number of entries
+ * so we must check for overflow
+ */
+ if (entry_index + sub_pool->entry_size > stop_index)
+ return -ENOBUFS;
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __set_bit(entry_index + i, pool->usage);
+
+ *p_counter_index = entry_index;
+ return 0;
+}
+
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
+
+ if (WARN_ON(counter_index >= pool->pool_size))
+ return;
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __clear_bit(counter_index + i, pool->usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644
index 000000000000..031bc4abbe2d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -0,0 +1,53 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_CNT_H
+#define _MLXSW_SPECTRUM_CNT_H
+
+#include "spectrum.h"
+
+enum mlxsw_sp_counter_sub_pool_id {
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+};
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index);
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index);
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ae6cccc666e4..e724c6266247 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -39,6 +39,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "spectrum.h"
#include "core_acl_flex_keys.h"
@@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (tc_no_actions(exts))
return 0;
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ if (err)
+ return err;
+
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
@@ -73,6 +79,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
out_dev);
if (err)
return err;
+ } else if (is_tcf_vlan(a)) {
+ u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
+ u32 action = tcf_vlan_action(a);
+ u8 prio = tcf_vlan_push_prio(a);
+ u16 vid = tcf_vlan_push_vid(a);
+
+ return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ action, vid,
+ proto, prio);
} else {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
@@ -173,7 +188,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
}
@@ -234,6 +250,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
sizeof(key->src));
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VID,
+ key->vlan_id,
+ mask->vlan_id);
+ if (mask->vlan_priority != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_PCP,
+ key->vlan_priority,
+ mask->vlan_priority);
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
mlxsw_sp_flower_parse_ipv4(rulei, f);
@@ -314,3 +351,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+ ingress,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (WARN_ON(IS_ERR(ruleset)))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &bytes, &packets,
+ &lastuse);
+ if (err)
+ goto err_rule_get_stats;
+
+ preempt_disable();
+
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ preempt_enable();
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_get_stats:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index bd8de6b9be71..80345a1ddf17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -50,6 +50,21 @@
#include "core.h"
#include "reg.h"
+struct mlxsw_sp_rif {
+ struct list_head nexthop_list;
+ struct list_head neigh_list;
+ struct net_device *dev;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif;
+ u16 vr_id;
+};
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -89,12 +104,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
}
static void
-mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- memset(prefix_usage, 0, sizeof(*prefix_usage));
-}
-
-static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len)
{
@@ -125,7 +134,7 @@ struct mlxsw_sp_fib_node {
struct list_head entry_list;
struct list_head list;
struct rhash_head ht_node;
- struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_fib_key key;
};
@@ -149,13 +158,17 @@ struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
+ enum mlxsw_sp_l3proto proto;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_fib *fib;
int err;
@@ -167,6 +180,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
if (err)
goto err_rhashtable_init;
INIT_LIST_HEAD(&fib->node_list);
+ fib->proto = proto;
+ fib->vr = vr;
return fib;
err_rhashtable_init:
@@ -177,24 +192,21 @@ err_rhashtable_init:
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{
WARN_ON(!list_empty(&fib->node_list));
+ WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
static struct mlxsw_sp_lpm_tree *
-mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
{
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->ref_count == 0) {
- if (one_reserved)
- one_reserved = false;
- else
- return lpm_tree;
- }
+ if (lpm_tree->ref_count == 0)
+ return lpm_tree;
}
return NULL;
}
@@ -248,12 +260,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
@@ -283,7 +295,7 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -297,7 +309,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
goto inc_ref_count;
}
lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
- proto, one_reserved);
+ proto);
if (IS_ERR(lpm_tree))
return lpm_tree;
@@ -325,6 +337,11 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
}
}
+static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
+{
+ return !!vr->fib4;
+}
+
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_vr *vr;
@@ -332,31 +349,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
return NULL;
}
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto,
- vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -369,8 +386,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
struct mlxsw_sp_vr *vr;
int i;
@@ -379,69 +395,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
return NULL;
}
+static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return vr->fib4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage;
- struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_vr *vr;
- int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr)
return ERR_PTR(-EBUSY);
- vr->fib = mlxsw_sp_fib_create();
- if (IS_ERR(vr->fib))
- return ERR_CAST(vr->fib);
-
- vr->proto = proto;
+ vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr->fib4))
+ return ERR_CAST(vr->fib4);
vr->tb_id = tb_id;
- mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- proto, true);
- if (IS_ERR(lpm_tree)) {
- err = PTR_ERR(lpm_tree);
- goto err_tree_get;
- }
- vr->lpm_tree = lpm_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
- if (err)
- goto err_tree_bind;
-
- vr->used = true;
return vr;
-
-err_tree_bind:
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
-err_tree_get:
- mlxsw_sp_fib_destroy(vr->fib);
-
- return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
- mlxsw_sp_fib_destroy(vr->fib);
- vr->used = false;
+ mlxsw_sp_fib_destroy(vr->fib4);
+ vr->fib4 = NULL;
}
static int
-mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
struct mlxsw_sp_prefix_usage *req_prefix_usage)
{
- struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
struct mlxsw_sp_lpm_tree *new_tree;
int err;
@@ -449,7 +446,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
- vr->proto, false);
+ fib->proto);
if (IS_ERR(new_tree)) {
/* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good
@@ -463,8 +460,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
}
/* Prevent packet loss by overwriting existing binding */
- vr->lpm_tree = new_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ fib->lpm_tree = new_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
if (err)
goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
@@ -472,53 +469,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
err_tree_bind:
- vr->lpm_tree = lpm_tree;
+ fib->lpm_tree = lpm_tree;
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
return err;
}
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
{
struct mlxsw_sp_vr *vr;
- int err;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
- if (!vr) {
- vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
- if (IS_ERR(vr))
- return vr;
- } else {
- struct mlxsw_sp_prefix_usage req_prefix_usage;
-
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &vr->fib->prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- /* Need to replace LPM tree in case new prefix is required. */
- err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &req_prefix_usage);
- if (err)
- return ERR_PTR(err);
- }
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{
- /* Destroy virtual router entity in case the associated FIB is empty
- * and allow it to be used for other tables in future. Otherwise,
- * check if some prefix usage did not disappear and change tree if
- * that is the case. Note that in case new, smaller tree cannot be
- * allocated, the original one will be kept being used.
- */
- if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
- mlxsw_sp_vr_destroy(mlxsw_sp, vr);
- else
- mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &vr->fib->prefix_usage);
+ if (!vr->rif_count && list_empty(&vr->fib4->node_list))
+ mlxsw_sp_vr_destroy(vr);
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -1171,7 +1141,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr,
+ const struct mlxsw_sp_fib *fib,
u32 adj_index, u16 ecmp_size,
u32 new_adj_index,
u16 new_ecmp_size)
@@ -1179,8 +1149,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
- adj_index, ecmp_size, new_adj_index,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->vr->id, adj_index, ecmp_size, new_adj_index,
new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1190,14 +1160,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
u32 old_adj_index, u16 old_ecmp_size)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr = NULL;
+ struct mlxsw_sp_fib *fib = NULL;
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (vr == fib_entry->fib_node->vr)
+ if (fib == fib_entry->fib_node->fib)
continue;
- vr = fib_entry->fib_node->vr;
- err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ fib = fib_entry->fib_node->fib;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
old_adj_index,
old_ecmp_size,
nh_grp->adj_index,
@@ -1514,6 +1484,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ if (!dev)
+ return 0;
+
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN)
@@ -1699,7 +1672,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
{
fib_entry->offloaded = true;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_inc(fib_entry->nh_group->key.fi);
break;
@@ -1711,7 +1684,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
static void
mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
{
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_dec(fib_entry->nh_group->key.fi);
break;
@@ -1751,8 +1724,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -1772,8 +1745,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
@@ -1785,10 +1758,10 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op)
{
struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
u16 trap_id = 0;
u16 rif = 0;
@@ -1801,8 +1774,8 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1812,13 +1785,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1845,7 +1818,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = -EINVAL;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
break;
@@ -1877,17 +1850,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{
struct fib_info *fi = fen_info->fi;
- if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
+ switch (fen_info->type) {
+ case RTN_BROADCAST: /* fall through */
+ case RTN_LOCAL:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
- }
- if (fen_info->type != RTN_UNICAST)
- return -EINVAL;
- if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ case RTN_UNREACHABLE: /* fall through */
+ case RTN_BLACKHOLE: /* fall through */
+ case RTN_PROHIBIT:
+ /* Packets hitting these routes need to be trapped, but
+ * can do so with a lower priority than packets directed
+ * at the host, so use action type local instead of trap.
+ */
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- else
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return 0;
+ return 0;
+ case RTN_UNICAST:
+ if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct mlxsw_sp_fib_entry *
@@ -1996,7 +1981,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
}
static struct mlxsw_sp_fib_node *
-mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
{
struct mlxsw_sp_fib_node *fib_node;
@@ -2006,18 +1991,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
return NULL;
INIT_LIST_HEAD(&fib_node->entry_list);
- list_add(&fib_node->list, &vr->fib->node_list);
+ list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
- mlxsw_sp_fib_node_insert(vr->fib, fib_node);
- fib_node->vr = vr;
return fib_node;
}
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
- mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
list_del(&fib_node->list);
WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
@@ -2034,7 +2016,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
@@ -2043,32 +2025,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
}
+static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ err = mlxsw_sp_fib_node_insert(fib, fib_node);
+ if (err)
+ return err;
+ fib_node->fib = fib;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+
+ if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
+ &req_prefix_usage);
+ if (err)
+ goto err_tree_check;
+ } else {
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return PTR_ERR(lpm_tree);
+ fib->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
+ if (err)
+ goto err_tree_bind;
+ }
+
+ mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+ return 0;
+
+err_tree_bind:
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_tree_check:
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+
+ mlxsw_sp_fib_node_prefix_dec(fib_node);
+
+ if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ } else {
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
+ }
+
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+}
+
static struct mlxsw_sp_fib_node *
mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
- MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
if (IS_ERR(vr))
return ERR_CAST(vr);
+ fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
- fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (fib_node)
return fib_node;
- fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (!fib_node) {
@@ -2076,22 +2124,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
goto err_fib_node_create;
}
+ err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
+ if (err)
+ goto err_fib_node_init;
+
return fib_node;
+err_fib_node_init:
+ mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_vr *vr = fib_node->vr;
+ struct mlxsw_sp_vr *vr = fib_node->fib->vr;
if (!list_empty(&fib_node->entry_list))
return;
+ mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
}
static struct mlxsw_sp_fib_entry *
@@ -2236,8 +2291,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_fib4_node_entry_add;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib4_node_entry_add:
@@ -2251,7 +2304,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
mlxsw_sp_fib4_node_list_remove(fib_entry);
}
@@ -2340,9 +2392,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
char ralst_pl[MLXSW_REG_RALST_LEN];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
- int err;
+ int i, err;
mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_SP_LPM_TREE_MIN);
@@ -2355,16 +2405,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
- MLXSW_SP_LPM_TREE_MIN);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
- if (err)
- return err;
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ if (!mlxsw_sp_vr_is_used(vr))
+ continue;
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
+ 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ ralue_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -2390,7 +2457,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- switch (fib_node->vr->proto) {
+ switch (fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
break;
@@ -2400,26 +2467,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
struct mlxsw_sp_fib_node *fib_node, *tmp;
- struct mlxsw_sp_vr *vr;
+
+ list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
+ bool do_break = &tmp->list == &fib->node_list;
+
+ mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
+ if (do_break)
+ break;
+ }
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
continue;
-
- list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
- list) {
- bool do_break = &tmp->list == &vr->fib->node_list;
-
- mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
}
}
@@ -2437,70 +2510,6 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (WARN_ON_ONCE(err))
- return err;
-
- mlxsw_reg_ritr_enable_set(ritr_pl, false);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
- mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
- mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
-}
-
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- u64 max_rifs;
- int err;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
- return -EIO;
-
- max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- goto err_rgcr_fail;
-
- return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
-}
-
struct mlxsw_sp_fib_event_work {
struct work_struct work;
union {
@@ -2594,6 +2603,614 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ mlxsw_reg_ritr_enable_set(ritr_pl, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
+ mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
+ const struct in_device *in_dev,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!r)
+ return true;
+ return false;
+ case NETDEV_DOWN:
+ if (r && !in_dev->ifa_list)
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+#define MLXSW_SP_INVALID_RIF 0xffff
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_INVALID_RIF;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ u16 vr_id, struct net_device *l3_dev,
+ u16 rif, bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, vr_id,
+ l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
+{
+ return MLXSW_SP_RFID_BASE + rif;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif, u16 vr_id, struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ INIT_LIST_HEAD(&r->nexthop_list);
+ INIT_LIST_HEAD(&r->neigh_list);
+ ether_addr_copy(r->addr, l3_dev->dev_addr);
+ r->mtu = l3_dev->mtu;
+ r->vr_id = vr_id;
+ r->dev = l3_dev;
+ r->rif = rif;
+ r->f = f;
+
+ return r;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *r;
+ u16 fid, rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_INVALID_RIF)
+ return ERR_PTR(-ERANGE);
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif,
+ true);
+ if (err)
+ goto err_vport_rif_sp_op;
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+ vr->rif_count++;
+
+ return r;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
+err_vport_rif_sp_op:
+ mlxsw_sp_vr_put(vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 fid = f->fid;
+ u16 rif = r->rif;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
+
+ mlxsw_sp_vr_put(vr);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!r) {
+ r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
+ r->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
+ 1, MLXSW_PORT_ROUTER_PORT, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_rif *r;
+ u16 rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_INVALID_RIF)
+ return -ERANGE;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+ if (err)
+ goto err_port_flood_set;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif,
+ true);
+ if (err)
+ goto err_rif_bridge_op;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+ vr->rif_count++;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+err_port_flood_set:
+ mlxsw_sp_vr_put(vr);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 rif = r->rif;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
+
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+ mlxsw_sp_vr_put(vr);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(r, ifa->ifa_dev, event))
+ goto out;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_inetaddr_vlan_event(dev, event);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!r)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(r->addr, dev->dev_addr);
+ r->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
+ return err;
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
@@ -2606,6 +3223,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
mlxsw_sp_router_fib_flush(mlxsw_sp);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ u64 max_rifs;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 279ee4612981..20358f87de57 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -212,25 +212,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
}
/**
- * ks8851_rx_1msg - select whether to use one or two messages for spi read
- * @ks: The device structure
- *
- * Return whether to generate a single message with a tx and rx buffer
- * supplied to spi_sync(), or alternatively send the tx and rx buffers
- * as separate messages.
- *
- * Depending on the hardware in use, a single message may be more efficient
- * on interrupts or work done by the driver.
- *
- * This currently always returns true until we add some per-device data passed
- * from the platform code to specify which mode is better.
- */
-static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
-{
- return true;
-}
-
-/**
* ks8851_rdreg - issue read register command and return the data
* @ks: The device state
* @op: The register address and byte enables in message format.
@@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (ks8851_rx_1msg(ks)) {
- msg = &ks->spi_msg1;
- xfer = &ks->spi_xfer1;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = trx;
- xfer->len = rxl + 2;
- } else {
+ if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
msg = &ks->spi_msg2;
xfer = ks->spi_xfer2;
@@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
xfer->tx_buf = NULL;
xfer->rx_buf = trx;
xfer->len = rxl;
+ } else {
+ msg = &ks->spi_msg1;
+ xfer = &ks->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
}
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (ks8851_rx_1msg(ks))
- memcpy(rxb, trx + 2, rxl);
- else
+ else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
+ else
+ memcpy(rxb, trx + 2, rxl);
}
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e614a376b595..4d45f4573b57 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,14 +50,14 @@
#include "nfp_net_ctrl.h"
-#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
-#define nn_warn_ratelimit(nn, fmt, args...) \
+#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
+#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
+#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
+#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_dp_warn(dp, fmt, args...) \
do { \
if (unlikely(net_ratelimit())) \
- netdev_warn((nn)->netdev, fmt, ## args); \
+ netdev_warn((dp)->netdev, fmt, ## args); \
} while (0)
/* Max time to wait for NFP to respond on updates (in seconds) */
@@ -112,6 +112,7 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
@@ -315,8 +316,6 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @bufsz: Buffer allocation size for convenience of management routines
- * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -338,7 +337,6 @@ struct nfp_net_rx_ring {
dma_addr_t dma;
unsigned int size;
- unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -433,19 +431,76 @@ struct nfp_stat_pair {
};
/**
- * struct nfp_net - NFP network device structure
- * @pdev: Backpointer to PCI device
- * @netdev: Backpointer to net_device structure
- * @is_vf: Is the driver attached to a VF?
+ * struct nfp_net_dp - NFP network device datapath data structure
+ * @dev: Backpointer to struct device
+ * @netdev: Backpointer to net_device structure
+ * @is_vf: Is the driver attached to a VF?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
- * @ctrl: Local copy of the control register/word.
- * @fl_bufsz: Currently configured size of the freelist buffers
+ * @chained_metadata_format: Firemware will use new metadata format
+ * @rx_dma_dir: Mapping direction for RX buffers
+ * @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
+ * @ctrl: Local copy of the control register/word.
+ * @fl_bufsz: Currently configured size of the freelist buffers
* @xdp_prog: Installed XDP program
- * @fw_ver: Firmware version
+ * @tx_rings: Array of pre-allocated TX ring structures
+ * @rx_rings: Array of pre-allocated RX ring structures
+ * @ctrl_bar: Pointer to mapped control BAR
+ *
+ * @txd_cnt: Size of the TX ring in number of descriptors
+ * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @num_r_vecs: Number of used ring vectors
+ * @num_tx_rings: Currently configured number of TX rings
+ * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
+ * @num_rx_rings: Currently configured number of RX rings
+ * @mtu: Device MTU
+ */
+struct nfp_net_dp {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u8 is_vf:1;
+ u8 bpf_offload_skip_sw:1;
+ u8 bpf_offload_xdp:1;
+ u8 chained_metadata_format:1;
+
+ u8 rx_dma_dir;
+ u8 rx_dma_off;
+
+ u8 rx_offset;
+
+ u32 ctrl;
+ u32 fl_bufsz;
+
+ struct bpf_prog *xdp_prog;
+
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
+ u8 __iomem *ctrl_bar;
+
+ /* Cold data follows */
+
+ unsigned int txd_cnt;
+ unsigned int rxd_cnt;
+
+ unsigned int num_r_vecs;
+
+ unsigned int num_tx_rings;
+ unsigned int num_stack_tx_rings;
+ unsigned int num_rx_rings;
+
+ unsigned int mtu;
+};
+
+/**
+ * struct nfp_net - NFP network device structure
+ * @dp: Datapath structure
+ * @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
+ * @rss_hfunc: RSS selected hash function
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
@@ -454,17 +509,9 @@ struct nfp_stat_pair {
* @rx_filter_change: Jiffies when statistics last changed
* @rx_filter_stats_timer: Timer for polling filter offload statistics
* @rx_filter_lock: Lock protecting timer state changes (teardown)
+ * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
- * @num_tx_rings: Currently configured number of TX rings
- * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
- * @num_rx_rings: Currently configured number of RX rings
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
- * @tx_rings: Array of pre-allocated TX ring structures
- * @rx_rings: Array of pre-allocated RX ring structures
- * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
- * @num_r_vecs: Number of used ring vectors
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -488,36 +535,24 @@ struct nfp_stat_pair {
* @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW
* @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts
* @qcp_cfg: Pointer to QCP queue used for configuration notification
- * @ctrl_bar: Pointer to mapped control BAR
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
* @port_list: Entry on device port list
+ * @pdev: Backpointer to PCI device
* @cpp: CPP device handle if available
+ * @eth_port: Translated ETH Table port entry
*/
struct nfp_net {
- struct pci_dev *pdev;
- struct net_device *netdev;
-
- unsigned is_vf:1;
- unsigned bpf_offload_skip_sw:1;
- unsigned bpf_offload_xdp:1;
-
- u32 ctrl;
- u32 fl_bufsz;
-
- u32 rx_offset;
-
- struct bpf_prog *xdp_prog;
-
- struct nfp_net_tx_ring *tx_rings;
- struct nfp_net_rx_ring *rx_rings;
+ struct nfp_net_dp dp;
struct nfp_net_fw_version fw_ver;
+
u32 cap;
u32 max_mtu;
+ u8 rss_hfunc;
u32 rss_cfg;
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
@@ -530,18 +565,10 @@ struct nfp_net {
unsigned int max_tx_rings;
unsigned int max_rx_rings;
- unsigned int num_tx_rings;
- unsigned int num_stack_tx_rings;
- unsigned int num_rx_rings;
-
int stride_tx;
int stride_rx;
- int txd_cnt;
- int rxd_cnt;
-
unsigned int max_r_vecs;
- unsigned int num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
@@ -575,7 +602,6 @@ struct nfp_net {
u8 __iomem *qcp_cfg;
- u8 __iomem *ctrl_bar;
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
@@ -584,14 +610,10 @@ struct nfp_net {
struct list_head port_list;
+ struct pci_dev *pdev;
struct nfp_cpp *cpp;
-};
-struct nfp_net_ring_set {
- unsigned int n_rings;
- unsigned int mtu;
- unsigned int dcnt;
- void *rings;
+ struct nfp_eth_table_port *eth_port;
};
/* Functions to read/write from/to a BAR
@@ -599,42 +621,42 @@ struct nfp_net_ring_set {
*/
static inline u16 nn_readb(struct nfp_net *nn, int off)
{
- return readb(nn->ctrl_bar + off);
+ return readb(nn->dp.ctrl_bar + off);
}
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
- writeb(val, nn->ctrl_bar + off);
+ writeb(val, nn->dp.ctrl_bar + off);
}
static inline u16 nn_readw(struct nfp_net *nn, int off)
{
- return readw(nn->ctrl_bar + off);
+ return readw(nn->dp.ctrl_bar + off);
}
static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
{
- writew(val, nn->ctrl_bar + off);
+ writew(val, nn->dp.ctrl_bar + off);
}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
- return readl(nn->ctrl_bar + off);
+ return readl(nn->dp.ctrl_bar + off);
}
static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
{
- writel(val, nn->ctrl_bar + off);
+ writel(val, nn->dp.ctrl_bar + off);
}
static inline u64 nn_readq(struct nfp_net *nn, int off)
{
- return readq(nn->ctrl_bar + off);
+ return readq(nn->dp.ctrl_bar + off);
}
static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
{
- writeq(val, nn->ctrl_bar + off);
+ writeq(val, nn->dp.ctrl_bar + off);
}
/* Flush posted PCI writes by reading something without side effects */
@@ -776,6 +798,7 @@ void nfp_net_netdev_clean(struct net_device *netdev);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
@@ -787,9 +810,9 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new);
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..f134f1808b9a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,6 +41,7 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/module.h>
@@ -66,6 +67,7 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
+#include "nfpcore/nfp_nsp_eth.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
@@ -83,20 +85,18 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t
-nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
- int direction)
+static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
- return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
}
-static void
-nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
- unsigned int bufsz, int direction)
+static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
- dma_unmap_single(&nn->pdev->dev, dma_addr,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ dma_unmap_single(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
}
/* Firmware reconfig
@@ -327,19 +327,22 @@ void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n)
{
+ struct nfp_net_dp *dp = &nn->dp;
+
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
- nn->num_r_vecs = nn->max_r_vecs;
+ dp->num_r_vecs = nn->max_r_vecs;
memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
- if (nn->num_rx_rings > nn->num_r_vecs ||
- nn->num_tx_rings > nn->num_r_vecs)
+ if (dp->num_rx_rings > dp->num_r_vecs ||
+ dp->num_tx_rings > dp->num_r_vecs)
nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
- nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+ dp->num_rx_rings, dp->num_tx_rings,
+ dp->num_r_vecs);
- nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
- nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
- nn->num_stack_tx_rings = nn->num_tx_rings;
+ dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
+ dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
+ dp->num_stack_tx_rings = dp->num_tx_rings;
}
/**
@@ -394,11 +397,11 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
nn->link_up = link_up;
if (nn->link_up) {
- netif_carrier_on(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Up\n");
+ netif_carrier_on(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Up\n");
} else {
- netif_carrier_off(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Down\n");
+ netif_carrier_off(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Down\n");
}
out:
spin_unlock_irqrestore(&nn->link_status_lock, flags);
@@ -530,7 +533,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry = &nn->irq_entries[vector_idx];
- snprintf(name, name_sz, format, netdev_name(nn->netdev));
+ snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
err = request_irq(entry->vector, handler, 0, name, nn);
if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -617,7 +620,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
/**
* nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @nn: NFP Net device
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor
@@ -626,7 +628,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size.
*/
-static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -657,7 +659,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
/**
* nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to TX descriptor
@@ -666,7 +668,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
* This function sets the TX checksum flags in the TX descriptor based
* on the configuration and the protocol of the packet to be transmitted.
*/
-static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -674,7 +677,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct iphdr *iph;
u8 l4_hdr;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
return;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -693,8 +696,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
} else if (ipv6h->version == 6) {
l4_hdr = ipv6h->nexthdr;
} else {
- nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
- iph->version);
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
return;
}
@@ -706,8 +708,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
default:
- nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
return;
}
@@ -737,27 +738,29 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_desc *txd, txdg;
- struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_net_tx_buf *txbuf;
struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
int f, nr_frags;
int wr_idx;
u16 qidx;
+ dp = &nn->dp;
qidx = skb_get_queue_mapping(skb);
- tx_ring = &nn->tx_rings[qidx];
+ tx_ring = &dp->tx_rings[qidx];
r_vec = tx_ring->r_vec;
- nd_q = netdev_get_tx_queue(nn->netdev, qidx);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
netif_tx_stop_queue(nd_q);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_busy++;
@@ -766,9 +769,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
}
/* Start with the head skbuf */
- dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -792,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->mss = 0;
txd->l4_offset = 0;
- nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb);
- nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
@@ -810,9 +813,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[f];
fsize = skb_frag_size(frag);
- dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
@@ -851,8 +854,7 @@ err_unmap:
--f;
while (f >= 0) {
frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
@@ -861,13 +863,13 @@ err_unmap:
if (wr_idx < 0)
wr_idx += tx_ring->cnt;
}
- dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
- nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
@@ -884,7 +886,7 @@ err_free:
static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
u32 done_pkts = 0, done_bytes = 0;
@@ -918,8 +920,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
done_pkts += tx_ring->txbufs[idx].pkt_cnt;
@@ -927,8 +928,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
@@ -948,7 +948,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) {
/* Make sure TX thread will see updated tx_ring->rd_p */
@@ -966,7 +966,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
u32 done_pkts = 0, done_bytes = 0;
int idx, todo;
u32 qcp_rd_p;
@@ -989,8 +989,7 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs[idx].frag)
continue;
- nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr);
__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
done_pkts++;
@@ -1015,17 +1014,16 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
static void
-nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag;
- struct pci_dev *pdev = nn->pdev;
struct netdev_queue *nd_q;
while (tx_ring->rd_p != tx_ring->wr_p) {
@@ -1036,8 +1034,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
tx_buf = &tx_ring->txbufs[idx];
if (tx_ring == r_vec->xdp_ring) {
- nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
+ nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr);
__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
} else {
struct sk_buff *skb = tx_ring->txbufs[idx].skb;
@@ -1045,13 +1042,13 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
if (tx_buf->fidx == -1) {
/* unmap head */
- dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
skb_headlen(skb),
DMA_TO_DEVICE);
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
skb_frag_size(frag),
DMA_TO_DEVICE);
}
@@ -1078,7 +1075,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
if (tx_ring == r_vec->xdp_ring)
return;
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -1087,7 +1084,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
+ for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
continue;
nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1098,16 +1095,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
/* Receive processing
*/
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
+nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
unsigned int fl_bufsz;
fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ fl_bufsz += dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
- fl_bufsz += nn->rx_offset;
- fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
+ fl_bufsz += dp->rx_offset;
+ fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1126,62 +1124,56 @@ nfp_net_free_frag(void *frag, bool xdp)
/**
* nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param)
- * @fl_bufsz: size of freelist buffers
- * @xdp: Whether XDP is enabled
*
* This function will allcate a new page frag, map it for DMA.
*
* Return: allocated page frag or NULL on failure.
*/
static void *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
- unsigned int fl_bufsz, bool xdp)
+nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ dma_addr_t *dma_addr)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- int direction;
void *frag;
- if (!xdp)
- frag = netdev_alloc_frag(fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = netdev_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
- *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, xdp);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
return frag;
}
-static void *
-nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
+static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!nn->xdp_prog)
- frag = napi_alloc_frag(nn->fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = napi_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, nn->xdp_prog);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
@@ -1190,11 +1182,13 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
/**
* nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring structure
* @frag: page fragment buffer
* @dma_addr: DMA address of skb mapping
*/
-static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
+static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
void *frag, dma_addr_t dma_addr)
{
unsigned int wr_idx;
@@ -1208,7 +1202,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
rx_ring->wr_ptr_add++;
@@ -1249,19 +1244,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
* entries. After device is disabled nfp_net_rx_ring_reset() must be called
* to restore required ring geometry.
*/
static void
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
- int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1272,9 +1265,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
if (!rx_ring->rxbufs[i].frag)
continue;
- nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
- rx_ring->bufsz, direction);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
rx_ring->rxbufs[i].dma_addr = 0;
rx_ring->rxbufs[i].frag = NULL;
}
@@ -1282,13 +1274,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*/
static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_rx_buf *rxbufs;
unsigned int i;
@@ -1297,10 +1288,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
for (i = 0; i < rx_ring->cnt - 1; i++) {
rxbufs[i].frag =
- nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
- rx_ring->bufsz, xdp);
+ nfp_net_rx_alloc_one(dp, rx_ring, &rxbufs[i].dma_addr);
if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
return -ENOMEM;
}
}
@@ -1310,14 +1300,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to fill
*/
-static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
+ nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
rx_ring->rxbufs[i].dma_addr);
}
@@ -1337,17 +1330,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
/**
* nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
* @skb: Pointer to SKB
*/
-static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_rx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
- if (!(nn->netdev->features & NETIF_F_RXCSUM))
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
@@ -1398,24 +1392,21 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
static void
nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+ void *data, struct nfp_net_rx_desc *rxd)
{
- struct nfp_net_rx_hash *rx_hash;
+ struct nfp_net_rx_hash *rx_hash = data;
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash);
}
static void *
nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
- int meta_len)
+ void *data, int meta_len)
{
- u8 *data = skb->data - meta_len;
u32 meta_info;
meta_info = get_unaligned_be32(data);
@@ -1445,8 +1436,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
}
static void
-nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
+nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
@@ -1458,15 +1450,15 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
if (skb && rxbuf && skb->head == rxbuf->frag)
page_ref_inc(virt_to_head_page(rxbuf->frag));
if (rxbuf)
- nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
if (skb)
dev_kfree_skb_any(skb);
}
static bool
-nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len)
{
struct nfp_net_tx_buf *txbuf;
@@ -1476,16 +1468,16 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
return false;
}
- new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
return false;
}
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -1497,14 +1489,14 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
txbuf->pkt_cnt = 1;
txbuf->real_len = pkt_len;
- dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
pkt_len, DMA_BIDIRECTIONAL);
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = PCIE_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -1516,14 +1508,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
+ unsigned int *off, unsigned int *len)
{
struct xdp_buff xdp;
+ void *orig_data;
+ int ret;
+
+ xdp.data_hard_start = hard_start;
+ xdp.data = data + *off;
+ xdp.data_end = data + *off + *len;
- xdp.data = data;
- xdp.data_end = data + len;
+ orig_data = xdp.data;
+ ret = bpf_prog_run_xdp(prog, &xdp);
- return bpf_prog_run_xdp(prog, &xdp);
+ *len -= xdp.data - orig_data;
+ *off += xdp.data - orig_data;
+
+ return ret;
}
/**
@@ -1540,27 +1542,27 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
- int rx_dma_map_dir;
int idx;
rcu_read_lock();
- xdp_prog = READ_ONCE(nn->xdp_prog);
- rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
- unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
+ unsigned int meta_len, data_len, data_off, pkt_len;
+ u8 meta_prepend[NFP_NET_MAX_PREPEND];
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
dma_addr_t new_dma_addr;
void *new_frag;
+ u8 *meta;
idx = rx_ring->rd_p & (rx_ring->cnt - 1);
@@ -1593,11 +1595,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off = meta_len;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ data_off = NFP_NET_RX_BUF_HEADROOM + meta_len;
else
- pkt_off = nn->rx_offset;
- data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
+ data_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_offset;
+ data_off += dp->rx_dma_off;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
@@ -1605,30 +1607,55 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
+ /* Pointer to start of metadata */
+ meta = rxbuf->frag + data_off - meta_len;
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- nn->bpf_offload_xdp)) {
+ dp->bpf_offload_xdp)) {
+ unsigned int dma_off;
+ void *hard_start;
int act;
- dma_sync_single_for_cpu(&nn->pdev->dev,
- rxbuf->dma_addr + pkt_off,
- pkt_len, DMA_BIDIRECTIONAL);
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
- pkt_len);
+ hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
+ dma_sync_single_for_cpu(dp->dev, rxbuf->dma_addr,
+ dma_off + pkt_len,
+ DMA_BIDIRECTIONAL);
+
+ /* Move prepend out of the way */
+ if (xdp_prog->xdp_adjust_head) {
+ memcpy(meta_prepend, meta, meta_len);
+ meta = meta_prepend;
+ }
+
+ act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
+ &data_off, &pkt_len);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+ dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
- pkt_off, pkt_len)))
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ dma_off,
+ pkt_len)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
case XDP_DROP:
- nfp_net_rx_give_one(rx_ring, rxbuf->frag,
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
continue;
}
@@ -1636,41 +1663,40 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue;
}
- new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
- &new_dma_addr);
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
continue;
}
- nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
- rx_dma_map_dir);
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
skb_reserve(skb, data_off);
skb_put(skb, pkt_len);
- if (nn->fw_ver.major <= 3) {
- nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+ if (!dp->chained_metadata_format) {
+ nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd);
} else if (meta_len) {
void *end;
- end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
- if (unlikely(end != skb->data)) {
- nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
- nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
+ end = nfp_net_parse_meta(dp->netdev, skb, meta,
+ meta_len);
+ if (unlikely(end != meta + meta_len)) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
}
skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, nn->netdev);
+ skb->protocol = eth_type_trans(skb, dp->netdev);
- nfp_net_rx_csum(nn, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1707,10 +1733,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
nfp_net_xdp_complete(r_vec->xdp_ring);
}
- if (pkts_polled < budget) {
- napi_complete_done(napi, pkts_polled);
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- }
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
return pkts_polled;
}
@@ -1725,13 +1750,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(tx_ring->txbufs);
if (tx_ring->txds)
- dma_free_coherent(&pdev->dev, tx_ring->size,
+ dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
@@ -1743,24 +1767,23 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
* @tx_ring: TX Ring structure to allocate
- * @cnt: Ring buffer count
* @is_xdp: True if ring will be used for XDP
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring,
+ bool is_xdp)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = cnt;
+ tx_ring->cnt = dp->txd_cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
- tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
@@ -1771,14 +1794,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
goto err_alloc;
if (!is_xdp)
- netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
- nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
- tx_ring->idx, tx_ring->qcidx,
- tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
- is_xdp ? "XDP" : "");
-
return 0;
err_alloc:
@@ -1786,62 +1804,45 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_tx_ring *
-nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- unsigned int num_stack_tx_rings)
+static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- struct nfp_net_tx_ring *rings;
unsigned int r;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
- for (r = 0; r < s->n_rings; r++) {
+ for (r = 0; r < dp->num_tx_rings; r++) {
int bias = 0;
- if (r >= num_stack_tx_rings)
- bias = num_stack_tx_rings;
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
- nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
+ nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
+ r);
- if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r], bias))
goto err_free_prev;
}
- return s->rings = rings;
+ return 0;
err_free_prev:
while (r--)
- nfp_net_tx_ring_free(&rings[r]);
- kfree(rings);
- return NULL;
-}
-
-static void
-nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
- struct nfp_net_ring_set new = *s;
-
- s->dcnt = nn->txd_cnt;
- s->rings = nn->tx_rings;
- s->n_rings = nn->num_tx_rings;
-
- nn->txd_cnt = new.dcnt;
- nn->tx_rings = new.rings;
- nn->num_tx_rings = new.n_rings;
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
+ kfree(dp->tx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_tx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++)
- nfp_net_tx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_tx_rings; r++)
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
- kfree(rings);
+ kfree(dp->tx_rings);
}
/**
@@ -1851,13 +1852,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
- dma_free_coherent(&pdev->dev, rx_ring->size,
+ dma_free_coherent(dp->dev, rx_ring->size,
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
@@ -1869,26 +1869,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to allocate
- * @fl_bufsz: Size of buffers to allocate
- * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
- u32 cnt)
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = cnt;
- rx_ring->bufsz = fl_bufsz;
-
+ rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
- rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
@@ -1898,10 +1891,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
if (!rx_ring->rxbufs)
goto err_alloc;
- nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
- rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
- rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
-
return 0;
err_alloc:
@@ -1909,82 +1898,59 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_rx_ring *
-nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
- struct nfp_net_rx_ring *rings;
unsigned int r;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
goto err_free_prev;
- if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
goto err_free_ring;
}
- return s->rings = rings;
+ return 0;
err_free_prev:
while (r--) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
err_free_ring:
- nfp_net_rx_ring_free(&rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
- return NULL;
-}
-
-static void
-nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
- struct nfp_net_ring_set new = *s;
-
- s->mtu = nn->netdev->mtu;
- s->dcnt = nn->rxd_cnt;
- s->rings = nn->rx_rings;
- s->n_rings = nn->num_rx_rings;
-
- nn->netdev->mtu = new.mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
- nn->rxd_cnt = new.dcnt;
- nn->rx_rings = new.rings;
- nn->num_rx_rings = new.n_rings;
+ kfree(dp->rx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_rx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
- nfp_net_rx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
+ kfree(dp->rx_rings);
}
static void
-nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- int idx)
+nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, int idx)
{
- r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
+ r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
r_vec->tx_ring =
- idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
+ idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
- r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
- &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
+ r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
+ &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
}
static int
@@ -1994,11 +1960,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int err;
/* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
nfp_net_poll, NAPI_POLL_WEIGHT);
snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, idx);
+ "%s-rxtx-%d", nn->dp.netdev->name, idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
@@ -2045,7 +2011,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn)
{
int i;
- for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
+ for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
get_unaligned_le32(nn->rss_key + i));
}
@@ -2069,13 +2035,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
(factor * nn->rx_coalesce_usecs);
- for (i = 0; i < nn->num_rx_rings; i++)
+ for (i = 0; i < nn->dp.num_rx_rings; i++)
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
/* copy TX interrupt coalesce parameters */
value = (nn->tx_coalesce_max_frames << 16) |
(factor * nn->tx_coalesce_usecs);
- for (i = 0; i < nn->num_tx_rings; i++)
+ for (i = 0; i < nn->dp.num_tx_rings; i++)
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
}
@@ -2090,9 +2056,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
- get_unaligned_be32(nn->netdev->dev_addr));
+ get_unaligned_be32(nn->dp.netdev->dev_addr));
nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
- get_unaligned_be16(nn->netdev->dev_addr + 4));
+ get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2116,7 +2082,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
update = NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
@@ -2133,14 +2099,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_reset(&nn->rx_rings[r]);
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void
@@ -2168,7 +2134,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
@@ -2184,22 +2150,22 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
nfp_net_write_mac_addr(nn);
- nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
- nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
+ nn->dp.fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -2212,18 +2178,18 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
- if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- udp_tunnel_get_rx_info(nn->netdev);
+ udp_tunnel_get_rx_info(nn->dp.netdev);
}
return err;
@@ -2252,12 +2218,12 @@ static void nfp_net_open_stack(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
napi_enable(&nn->r_vecs[r].napi);
enable_irq(nn->r_vecs[r].irq_vector);
}
- netif_tx_wake_all_queues(nn->netdev);
+ netif_tx_wake_all_queues(nn->dp.netdev);
enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
@@ -2266,19 +2232,10 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
int err, r;
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE) {
+ nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->dp.ctrl);
return -EBUSY;
}
@@ -2299,33 +2256,28 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err)
goto err_cleanup_vec_p;
}
- nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
- if (!nn->rx_rings) {
- err = -ENOMEM;
+ err = nfp_net_rx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_cleanup_vec;
- }
- nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
- nn->num_stack_tx_rings);
- if (!nn->tx_rings) {
- err = -ENOMEM;
+ err = nfp_net_tx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_free_rx_rings;
- }
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
+ err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
if (err)
goto err_free_rings;
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
if (err)
goto err_free_rings;
@@ -2351,11 +2303,11 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0;
err_free_rings:
- nfp_net_tx_ring_set_free(nn, &tx);
+ nfp_net_tx_rings_free(&nn->dp);
err_free_rx_rings:
- nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
+ nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
- r = nn->num_r_vecs;
+ r = nn->dp.num_r_vecs;
err_cleanup_vec_p:
while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
@@ -2374,15 +2326,15 @@ static void nfp_net_close_stack(struct nfp_net *nn)
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- netif_carrier_off(nn->netdev);
+ netif_carrier_off(nn->dp.netdev);
nn->link_up = false;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi);
}
- netif_tx_disable(nn->netdev);
+ netif_tx_disable(nn->dp.netdev);
}
/**
@@ -2393,17 +2345,17 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
- nfp_net_rx_ring_free(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
+ nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_free(&nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(nn->rx_rings);
- kfree(nn->tx_rings);
+ kfree(nn->dp.rx_rings);
+ kfree(nn->dp.tx_rings);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2417,8 +2369,8 @@ static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
- nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
+ nn_err(nn, "Dev is not up: 0x%08x\n", nn->dp.ctrl);
return 0;
}
@@ -2443,7 +2395,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
u32 new_ctrl;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
@@ -2454,13 +2406,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
}
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -2469,42 +2421,39 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn)
for (i = 0; i < sizeof(nn->rss_itbl); i++)
nn->rss_itbl[i] =
- ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+ ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
}
-static int
-nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
- unsigned int *stack_tx_rings,
- struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx)
+static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ struct nfp_net_dp new_dp = *dp;
+
+ *dp = nn->dp;
+ nn->dp = new_dp;
+
+ nn->dp.netdev->mtu = new_dp.mtu;
+
+ if (!netif_is_rxfh_configured(nn->dp.netdev))
+ nfp_net_rss_init_itbl(nn);
+}
+
+static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
{
unsigned int r;
int err;
- if (rx)
- nfp_net_rx_ring_set_swap(nn, rx);
- if (tx)
- nfp_net_tx_ring_set_swap(nn, tx);
-
- swap(*num_vecs, nn->num_r_vecs);
- swap(*stack_tx_rings, nn->num_stack_tx_rings);
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
+ nfp_net_dp_swap(nn, dp);
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
-
- err = netif_set_real_num_rx_queues(nn->netdev,
- nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
if (err)
return err;
- if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
- err = netif_set_real_num_tx_queues(nn->netdev,
- nn->num_stack_tx_rings);
+ if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
+ err = netif_set_real_num_tx_queues(nn->dp.netdev,
+ nn->dp.num_stack_tx_rings);
if (err)
return err;
}
@@ -2512,18 +2461,36 @@ nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
return __nfp_net_set_config_and_enable(nn);
}
-static int
-nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
+{
+ struct nfp_net_dp *new;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ *new = nn->dp;
+
+ /* Clear things which need to be recomputed */
+ new->fl_bufsz = 0;
+ new->tx_rings = NULL;
+ new->rx_rings = NULL;
+ new->num_r_vecs = 0;
+ new->num_stack_tx_rings = 0;
+
+ return new;
+}
+
+static int nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp)
{
/* XDP-enabled tests */
- if (!xdp_prog)
+ if (!dp->xdp_prog)
return 0;
- if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
+ if (dp->fl_bufsz > PAGE_SIZE) {
nn_warn(nn, "MTU too large w/ XDP enabled\n");
return -EINVAL;
}
- if (tx && tx->n_rings > nn->max_tx_rings) {
+ if (dp->num_tx_rings > nn->max_tx_rings) {
nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
return -EINVAL;
}
@@ -2531,119 +2498,94 @@ nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
return 0;
}
-static void
-nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx,
- unsigned int stack_tx_rings, unsigned int num_vecs)
-{
- nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
- nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
- nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
- nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
- nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- nn->num_stack_tx_rings = stack_tx_rings;
- nn->num_r_vecs = num_vecs;
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
-
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
-}
-
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- unsigned int stack_tx_rings, num_vecs, r;
- int err;
+ int r, err;
+
+ dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
- stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- if (*xdp_prog)
- stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
+ dp->num_stack_tx_rings = dp->num_tx_rings;
+ if (dp->xdp_prog)
+ dp->num_stack_tx_rings -= dp->num_rx_rings;
- num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
+ dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
- err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
+ err = nfp_net_check_config(nn, dp);
if (err)
- return err;
+ goto exit_free_dp;
- if (!netif_running(nn->netdev)) {
- nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
- stack_tx_rings, num_vecs);
- return 0;
+ if (!netif_running(dp->netdev)) {
+ nfp_net_dp_swap(nn, dp);
+ err = 0;
+ goto exit_free_dp;
}
/* Prepare new rings */
- for (r = nn->num_r_vecs; r < num_vecs; r++) {
+ for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err) {
- num_vecs = r;
+ dp->num_r_vecs = r;
goto err_cleanup_vecs;
}
}
- if (rx) {
- if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
- err = -ENOMEM;
- goto err_cleanup_vecs;
- }
- }
- if (tx) {
- if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
- err = -ENOMEM;
- goto err_free_rx;
- }
- }
+
+ err = nfp_net_rx_rings_prepare(nn, dp);
+ if (err)
+ goto err_cleanup_vecs;
+
+ err = nfp_net_tx_rings_prepare(nn, dp);
+ if (err)
+ goto err_free_rx;
/* Stop device, swap in new rings, try to start the firmware */
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
- err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err = nfp_net_dp_swap_enable(nn, dp);
if (err) {
int err2;
nfp_net_clear_config_and_disable(nn);
/* Try with old configuration and old rings */
- err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err2 = nfp_net_dp_swap_enable(nn, dp);
if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
err, err2);
}
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
- if (tx)
- nfp_net_tx_ring_set_free(nn, tx);
+ nfp_net_rx_rings_free(dp);
+ nfp_net_tx_rings_free(dp);
nfp_net_open_stack(nn);
+exit_free_dp:
+ kfree(dp);
return err;
err_free_rx:
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+ nfp_net_rx_rings_free(dp);
err_cleanup_vecs:
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ kfree(dp);
return err;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = new_mtu,
- .dcnt = nn->rxd_cnt,
- };
+ struct nfp_net_dp *dp;
+
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
+ dp->mtu = new_mtu;
+
+ return nfp_net_ring_reconfig(nn, dp);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -2652,7 +2594,7 @@ static void nfp_net_stat64(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
u64 data[3];
unsigned int start;
@@ -2699,7 +2641,7 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
return -ENOTSUPP;
if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
- if (!nn->bpf_offload_xdp)
+ if (!nn->dp.bpf_offload_xdp)
return nfp_net_bpf_offload(nn, tc->cls_bpf);
else
return -EBUSY;
@@ -2718,7 +2660,7 @@ static int nfp_net_set_features(struct net_device *netdev,
/* Assume this is not called with features we have not advertised */
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
@@ -2762,7 +2704,7 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
nn_err(nn, "Cannot disable HW TC offload while in use\n");
return -EBUSY;
}
@@ -2770,16 +2712,16 @@ static int nfp_net_set_features(struct net_device *netdev,
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return 0;
- nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
+ nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
return err;
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
return 0;
}
@@ -2830,6 +2772,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int
+nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ if (!nn->eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nn->eth_port->is_split)
+ err = snprintf(name, len, "p%d", nn->eth_port->label_port);
+ else
+ err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
+ nn->eth_port->label_subport);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -2842,7 +2804,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
nn->vxlan_ports[idx] = port;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
return;
BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
@@ -2921,8 +2883,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->bpf_offload_xdp)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->dp.bpf_offload_xdp)
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
} else {
@@ -2935,48 +2897,47 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
/* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE)
nfp_net_xdp_offload(nn, NULL);
- nn->bpf_offload_xdp = prog && !ret;
+ nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
{
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
+ struct bpf_prog *old_prog = nn->dp.xdp_prog;
+ struct nfp_net_dp *dp;
int err;
- if (prog && prog->xdp_adjust_head) {
- nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
- if (!prog && !nn->xdp_prog)
+ if (!prog && !nn->dp.xdp_prog)
return 0;
- if (prog && nn->xdp_prog) {
- prog = xchg(&nn->xdp_prog, prog);
+ if (prog && nn->dp.xdp_prog) {
+ prog = xchg(&nn->dp.xdp_prog, prog);
bpf_prog_put(prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
- tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->xdp_prog = prog;
+ dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
+ dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ if (prog)
+ dp->rx_dma_off = XDP_PACKET_HEADROOM -
+ (nn->dp.rx_offset ?: NFP_NET_MAX_PREPEND);
+ else
+ dp->rx_dma_off = 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
+ err = nfp_net_ring_reconfig(nn, dp);
if (err)
return err;
- /* @prog got swapped and is now the old one */
- if (prog)
- bpf_prog_put(prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
@@ -2989,7 +2950,7 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
case XDP_SETUP_PROG:
return nfp_net_xdp_setup(nn, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
+ xdp->prog_attached = !!nn->dp.xdp_prog;
return 0;
default:
return -EINVAL;
@@ -3008,6 +2969,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3020,9 +2982,9 @@ static const struct net_device_ops nfp_net_netdev_ops = {
void nfp_net_info(struct nfp_net *nn)
{
nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
- nn->is_vf ? "VF " : "",
- nn->num_tx_rings, nn->max_tx_rings,
- nn->num_rx_rings, nn->max_rx_rings);
+ nn->dp.is_vf ? "VF " : "",
+ nn->dp.num_tx_rings, nn->max_tx_rings,
+ nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
@@ -3074,21 +3036,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
nn = netdev_priv(netdev);
- nn->netdev = netdev;
+ nn->dp.netdev = netdev;
+ nn->dp.dev = &pdev->dev;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
- nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
- nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
+ nn->dp.num_tx_rings = min_t(unsigned int,
+ max_tx_rings, num_online_cpus());
+ nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
netif_get_num_default_rss_queues());
- nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
- nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
+ nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
+ nn->dp.num_r_vecs = min_t(unsigned int,
+ nn->dp.num_r_vecs, num_online_cpus());
- nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
- nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
+ nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->rx_filter_lock);
@@ -3108,7 +3073,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
*/
void nfp_net_netdev_free(struct nfp_net *nn)
{
- free_netdev(nn->netdev);
+ free_netdev(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_rss_key_sz() - Get current size of the RSS key
+ * @nn: NFP Net device instance
+ *
+ * Return: size of the RSS key for currently selected hash function.
+ */
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
+{
+ switch (nn->rss_hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return NFP_NET_CFG_RSS_KEY_SZ;
+ case ETH_RSS_HASH_XOR:
+ return 0;
+ case ETH_RSS_HASH_CRC32:
+ return 4;
+ }
+
+ nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
+ return 0;
}
/**
@@ -3117,14 +3103,32 @@ void nfp_net_netdev_free(struct nfp_net *nn)
*/
static void nfp_net_rss_init(struct nfp_net *nn)
{
- netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
+ unsigned long func_bit, rss_cap_hfunc;
+ u32 reg;
+
+ /* Read the RSS function capability and select first supported func */
+ reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
+ if (!rss_cap_hfunc)
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
+ NFP_NET_CFG_RSS_TOEPLITZ);
+
+ func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
+ if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
+ dev_warn(nn->dp.dev,
+ "Bad RSS config, defaulting to Toeplitz hash\n");
+ func_bit = ETH_RSS_HASH_TOP_BIT;
+ }
+ nn->rss_hfunc = 1 << func_bit;
+
+ netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
nfp_net_rss_init_itbl(nn);
/* Enable IPv4/IPv6 TCP by default */
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_TOEPLITZ |
+ FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
NFP_NET_CFG_RSS_MASK;
}
@@ -3151,6 +3155,17 @@ int nfp_net_netdev_init(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int err;
+ /* XDP calls for 256 byte packet headroom which wouldn't fit in a u8.
+ * We, however, reuse the metadata prepend space for XDP buffers which
+ * is at least 1 byte long and as long as XDP headroom doesn't increase
+ * above 256 the *extra* XDP headroom will fit on 8 bits.
+ */
+ BUILD_BUG_ON(XDP_PACKET_HEADROOM > 256);
+
+ nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3158,17 +3173,26 @@ int nfp_net_netdev_init(struct net_device *netdev)
nfp_net_write_mac_addr(nn);
/* Determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2)
- nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- else
- nn->rx_offset = NFP_NET_RX_OFFSET;
+ if (nn->fw_ver.major >= 2) {
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ if (reg > NFP_NET_MAX_PREPEND) {
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
+ return -EINVAL;
+ }
+ nn->dp.rx_offset = reg;
+ } else {
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+ }
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
netdev->mtu = nn->max_mtu;
else
netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
+ nn->dp.mtu = netdev->mtu;
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
/* Advertise/enable offloads based on capabilities
*
@@ -3179,31 +3203,31 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_HIGHDMA;
if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
netdev->hw_features |= NETIF_F_SG;
- nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
netdev->hw_features |= NETIF_F_RXHASH;
nfp_net_rss_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
}
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL;
- nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
netdev->hw_enc_features = netdev->hw_features;
}
@@ -3212,11 +3236,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
netdev->features = netdev->hw_features;
@@ -3229,14 +3253,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_irqmod_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
/* Stash the re-configuration queue away. First odd queue in TX Bar */
@@ -3275,9 +3299,9 @@ void nfp_net_netdev_clean(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- if (nn->bpf_offload_xdp)
+ if (nn->dp.xdp_prog)
+ bpf_prog_put(nn->dp.xdp_prog);
+ if (nn->dp.bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
- unregister_netdev(nn->netdev);
+ unregister_netdev(nn->dp.netdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 385ba355c965..71d86171b4ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -192,6 +192,14 @@
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
/**
+ * RSS capabilities
+ * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * @NFP_NET_CFG_RSS_HFUNC)
+ */
+#define NFP_NET_CFG_RSS_CAP 0x0054
+#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
+
+/**
* VXLAN/UDP encap configuration
* @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
@@ -249,7 +257,11 @@
#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
+#define NFP_NET_CFG_RSS_HFUNC 0xff000000
#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
+#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */
+#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */
+#define NFP_NET_CFG_RSS_HFUNCS 3
#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
#define NFP_NET_CFG_RSS_KEY_SZ 0x28
#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 6e9372a18375..74125584260b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
rxd_cnt = rx_ring->cnt;
@@ -64,8 +64,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
- seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
- rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p,
+ seq_printf(file, "RX[%02d,%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
+ rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
+ rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
+ rx_ring->rd_p, rx_ring->wr_p,
fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
for (i = 0; i < rxd_cnt; i++) {
@@ -143,7 +145,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
txd_cnt = tx_ring->cnt;
@@ -151,8 +153,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
- tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+ tx_ring->idx, tx_ring->qcidx,
+ tx_ring == r_vec->tx_ring ? "" : "xdp",
+ tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
+ tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
for (i = 0; i < txd_cnt; i++) {
txd = &tx_ring->txds[i];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2649f7523c81..ed22a813e579 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,6 +40,7 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -126,9 +127,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
-#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
+#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_RVEC_GATHER_STATS 7
-#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
+#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
@@ -179,30 +180,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
- ring->rx_pending = nn->rxd_cnt;
- ring->tx_pending = nn->txd_cnt;
+ ring->rx_pending = nn->dp.rxd_cnt;
+ ring->tx_pending = nn->dp.txd_cnt;
}
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = txd_cnt,
- };
+ struct nfp_net_dp *dp;
+
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- if (nn->rxd_cnt != rxd_cnt)
- reconfig_rx = &rx;
- if (nn->txd_cnt != txd_cnt)
- reconfig_tx = &tx;
+ dp->rxd_cnt = rxd_cnt;
+ dp->txd_cnt = txd_cnt;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ return nfp_net_ring_reconfig(nn, dp);
}
static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -223,11 +216,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
return -EINVAL;
- if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
@@ -245,7 +238,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_r_vecs; i++) {
+ for (i = 0; i < nn->dp.num_r_vecs; i++) {
sprintf(p, "rvec_%u_rx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_pkts", i);
@@ -267,13 +260,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
p += ETH_GSTRING_LEN;
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
- for (i = 0; i < nn->num_tx_rings; i++) {
+ for (i = 0; i < nn->dp.num_tx_rings; i++) {
sprintf(p, "txq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "txq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_rx_rings; i++) {
+ for (i = 0; i < nn->dp.num_rx_rings; i++) {
sprintf(p, "rxq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rxq_%u_bytes", i);
@@ -306,12 +299,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
break;
case NFP_NET_DEV_ET_STATS:
- io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
+ io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
data[i] = readq(io_p);
break;
}
}
- for (j = 0; j < nn->num_r_vecs; j++) {
+ for (j = 0; j < nn->dp.num_r_vecs; j++) {
unsigned int start;
do {
@@ -337,16 +330,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
- for (j = 0; j < nn->num_tx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
+ for (j = 0; j < nn->dp.num_tx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
- for (j = 0; j < nn->num_rx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
+ for (j = 0; j < nn->dp.num_rx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
}
@@ -410,7 +403,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = nn->num_rx_rings;
+ cmd->data = nn->dp.num_rx_rings;
return 0;
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
@@ -454,13 +447,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return -EINVAL;
}
- new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
+ new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
if (new_rss_cfg == nn->rss_cfg)
return 0;
- writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
+ writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
if (err)
return err;
@@ -496,7 +489,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
- return NFP_NET_CFG_RSS_KEY_SZ;
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ return -EOPNOTSUPP;
+
+ return nfp_net_rss_key_sz(nn);
}
static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -512,9 +510,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
indir[i] = nn->rss_itbl[i];
if (key)
- memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
+ if (hfunc) {
+ *hfunc = nn->rss_hfunc;
+ if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ }
return 0;
}
@@ -527,14 +528,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
- !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
+ !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
if (!key && !indir)
return 0;
if (key) {
- memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
+ memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
if (indir) {
@@ -564,7 +565,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
- regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
+ regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
}
static int nfp_net_get_coalesce(struct net_device *netdev,
@@ -736,16 +737,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int num_tx_rings;
- num_tx_rings = nn->num_tx_rings;
- if (nn->xdp_prog)
- num_tx_rings -= nn->num_rx_rings;
+ num_tx_rings = nn->dp.num_tx_rings;
+ if (nn->dp.xdp_prog)
+ num_tx_rings -= nn->dp.num_rx_rings;
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
channel->max_combined = min(channel->max_rx, channel->max_tx);
channel->max_other = NFP_NET_NON_Q_VECTORS;
- channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
- channel->rx_count = nn->num_rx_rings - channel->combined_count;
+ channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
+ channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
channel->tx_count = num_tx_rings - channel->combined_count;
channel->other_count = NFP_NET_NON_Q_VECTORS;
}
@@ -753,29 +754,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
unsigned int total_tx)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = total_rx,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = total_tx,
- .dcnt = nn->txd_cnt,
- };
+ struct nfp_net_dp *dp;
- if (nn->num_rx_rings != total_rx)
- reconfig_rx = &rx;
- if (nn->num_stack_tx_rings != total_tx ||
- (nn->xdp_prog && reconfig_rx))
- reconfig_tx = &tx;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
- if (nn->xdp_prog)
- tx.n_rings += total_rx;
+ dp->num_rx_rings = total_rx;
+ dp->num_tx_rings = total_tx;
+ /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
+ if (dp->xdp_prog)
+ dp->num_tx_rings += total_rx;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ return nfp_net_ring_reconfig(nn, dp);
}
static int nfp_net_set_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3afcdc11480c..2025cb7c6d90 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -130,7 +130,7 @@ err_area:
}
static void
-nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
+nfp_net_get_mac_addr_hwinfo(struct nfp_net_dp *dp, struct nfp_cpp *cpp,
unsigned int id)
{
u8 mac_addr[ETH_ALEN];
@@ -141,23 +141,22 @@ nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
mac_str = nfp_hwinfo_lookup(cpp, name);
if (!mac_str) {
- dev_warn(&nn->pdev->dev,
- "Can't lookup MAC address. Generate\n");
- eth_hw_addr_random(nn->netdev);
+ dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
+ eth_hw_addr_random(dp->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
- dev_warn(&nn->pdev->dev,
+ dev_warn(dp->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(dp->netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(dp->netdev->dev_addr, mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, mac_addr);
}
/**
@@ -178,12 +177,14 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
if (pf->eth_tbl->ports[i].eth_index == id) {
const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ nn->eth_port = &pf->eth_tbl->ports[i];
+
+ ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
return;
}
- nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+ nfp_net_get_mac_addr_hwinfo(&nn->dp, pf->cpp, id);
}
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
@@ -305,10 +306,10 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
nn->cpp = pf->cpp;
nn->fw_ver = *fw_ver;
- nn->ctrl_bar = ctrl_bar;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
- nn->is_vf = 0;
+ nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
@@ -330,7 +331,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
return err;
@@ -399,7 +400,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Get MSI-X vectors */
wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list)
- wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+ wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
@@ -444,7 +445,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
@@ -570,7 +571,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
list_for_each_entry(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_pf_free_netdevs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
index 18a851eb3508..b5b6f69d1e0f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
spin_lock_bh(&nn->rx_filter_lock);
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
mod_timer(&nn->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
@@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return NN_ACT_TC_DROP;
if (is_tcf_mirred_egress_redirect(a) &&
- tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
return NN_ACT_TC_REDIR;
}
@@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
act = ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->netdev->mtu) {
+ if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return -ENOTSUPP;
}
@@ -168,8 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
- *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
- GFP_KERNEL);
+ *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
return -ENOMEM;
@@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return 0;
out:
- dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
}
@@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
u64 bpf_addr = dma_addr;
int err;
- nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+ nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
if (dense_mode)
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
@@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
nn_err(nn, "FW command error while loading BPF: %d\n", err);
/* Enable passing packets through BPF function */
- nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
- dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
@@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
spin_lock_bh(&nn->rx_filter_lock);
- nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
spin_unlock_bh(&nn->rx_filter_lock);
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
del_timer_sync(&nn->rx_filter_stats_timer);
- nn->bpf_offload_skip_sw = 0;
+ nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
@@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
- if (nn->bpf_offload_skip_sw)
+ if (nn->dp.bpf_offload_skip_sw)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return 0;
case TC_CLSBPF_ADD:
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 39407f7cc586..86e61be6f35c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
if (!is_valid_ether_addr(mac_addr)) {
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(nn->dp.netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
@@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->ctrl_bar = ctrl_bar;
- nn->is_vf = 1;
+ nn->dp.ctrl_bar = ctrl_bar;
+ nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
NFP_NET_MIN_PORT_IRQS,
- NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+ NFP_NET_NON_Q_VECTORS +
+ nn->dp.num_r_vecs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO;
@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
goto err_irqs_disable;
@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
nfp_net_irqs_disable(pdev);
@@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
} else {
iounmap(vf->q_bar);
}
- iounmap(nn->ctrl_bar);
+ iounmap(nn->dp.ctrl_bar);
nfp_net_netdev_free(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 1ece1f8ae4b3..38bd80077e33 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -134,9 +134,32 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
- snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
- FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
- FIELD_GET(NSP_ETH_PORT_LABEL, port));
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+}
+
+static void
+nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++)
+ for (j = 0; j < table->count; j++) {
+ if (i == j)
+ continue;
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ nfp_warn(cpp,
+ "Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = true;
+ break;
+ }
}
/**
@@ -168,8 +191,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
{
struct eth_table_entry *entries;
struct nfp_eth_table *table;
- unsigned int cnt;
- int i, j, ret;
+ int i, j, ret, cnt = 0;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
@@ -178,24 +200,27 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
- kfree(entries);
- return NULL;
+ goto err;
}
- /* Some versions of flash will give us 0 instead of port count */
- cnt = ret;
- if (!cnt) {
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
- if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
- cnt++;
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count.
+ * For those that give a port count, verify it against the value
+ * calculated above.
+ */
+ if (ret && ret != cnt) {
+ nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n",
+ ret, cnt);
+ goto err;
}
table = kzalloc(sizeof(*table) +
sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
- if (!table) {
- kfree(entries);
- return NULL;
- }
+ if (!table)
+ goto err;
table->count = cnt;
for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
@@ -203,9 +228,15 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
nfp_eth_port_translate(&entries[i], i,
&table->ports[j++]);
+ nfp_eth_mark_split_ports(cpp, table);
+
kfree(entries);
return table;
+
+err:
+ kfree(entries);
+ return NULL;
}
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
index edf703d319c8..325e841ca90a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
@@ -49,10 +49,13 @@
* @lanes: number of channels
* @speed: interface speed (in Mbps)
* @mac_addr: interface MAC address
- * @label: interface id string
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
* @enabled: is enabled?
* @tx_enabled: is TX enabled?
* @rx_enabled: is RX enabled?
+ *
+ * @is_split: is interface part of a split port
*/
struct nfp_eth_table {
unsigned int count;
@@ -65,14 +68,22 @@ struct nfp_eth_table {
unsigned int speed;
u8 mac_addr[ETH_ALEN];
- char label[8];
+
+ u8 label_port;
+ u8 label_subport;
bool enabled;
bool tx_enabled;
bool rx_enabled;
+
+ /* Computed fields */
+ bool is_split;
} ports[0];
};
+struct nfp_cpp;
+struct nfp_nsp;
+
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
struct nfp_eth_table *
__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa6545b..ca30a27df035 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+#define DRV_MODULE_VERSION "8.10.10.21"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -219,7 +219,9 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G,
- QED_PORT_MODE_DE_1X25G
+ QED_PORT_MODE_DE_1X25G,
+ QED_PORT_MODE_DE_4X25G,
+ QED_PORT_MODE_DE_2X10G,
};
enum qed_dev_cap {
@@ -364,7 +366,8 @@ struct qed_hwfn {
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
-#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+ (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
@@ -523,9 +526,7 @@ struct qed_dev {
u8 dp_level;
char name[NAME_SIZE];
- u8 type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+ enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
@@ -540,6 +541,9 @@ struct qed_dev {
u16 vendor_id;
u16 device_id;
+#define QED_DEV_ID_MASK 0xff00
+#define QED_DEV_ID_MASK_BB 0x1600
+#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -654,10 +658,16 @@ struct qed_dev {
u32 rdma_max_srq_sge;
};
-#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -694,6 +704,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+int qed_device_num_engines(struct qed_dev *cdev);
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7e3a6fed3da6..9ff62cc5723d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -1127,7 +1126,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..483241b4b05d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
/* Chip IDs enum */
enum chip_ids {
- CHIP_RESERVED,
CHIP_BB_B0,
CHIP_K2,
MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
MEM_GROUP_BTB_RAM,
MEM_GROUP_RDIF_CTX,
MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
"BTB_RAM",
"RDIF_CTX",
"TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
u32 size_in_dwords;
};
+struct chip_platform_defs {
+ u8 num_ports;
+ u8 num_pfs;
+ u8 num_vfs;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
- struct {
- u8 num_ports;
- u8 num_pfs;
- } per_platform[MAX_PLATFORM_IDS];
+ struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
{ "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
- { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } },
+ { "k2",
+ { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } }
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
4, TCM_REG_SM_TASK_CTX},
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
7, MCM_REG_SM_TASK_CTX},
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
3, UCM_REG_SM_TASK_CTX},
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
0, 0},
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
12, YCM_REG_SM_TASK_CTX},
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
/* Block definitions array */
static struct block_defs block_grc_defs = {
- "grc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "grc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
- "pglue_b", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ "pglue_b",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
};
static struct block_defs block_cnig_defs = {
- "cnig", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "cnig",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
- "ncsi", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "ncsi",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
- "bmb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ "bmb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
};
static struct block_defs block_pcie_defs = {
- "pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
- "mcp2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "mcp2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
};
static struct block_defs block_pswhst_defs = {
- "pswhst", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
};
static struct block_defs block_pswhst2_defs = {
- "pswhst2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
};
static struct block_defs block_pswrd_defs = {
- "pswrd", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
};
static struct block_defs block_pswrd2_defs = {
- "pswrd2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
};
static struct block_defs block_pswwr_defs = {
- "pswwr", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswwr",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
- "pswrq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
};
static struct block_defs block_pswrq2_defs = {
- "pswrq2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
};
static struct block_defs block_pglcs_defs = {
- "pglcs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pglcs",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
};
static struct block_defs block_ptu_defs = {
- "ptu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "ptu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
};
static struct block_defs block_dmae_defs = {
- "dmae", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "dmae",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
};
static struct block_defs block_tcm_defs = {
- "tcm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tcm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
};
static struct block_defs block_mcm_defs = {
- "mcm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "mcm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
};
static struct block_defs block_ucm_defs = {
- "ucm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "ucm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
};
static struct block_defs block_xcm_defs = {
- "xcm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xcm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
};
static struct block_defs block_ycm_defs = {
- "ycm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ycm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
};
static struct block_defs block_pcm_defs = {
- "pcm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "pcm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
};
static struct block_defs block_qm_defs = {
- "qm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ "qm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
};
static struct block_defs block_tm_defs = {
- "tm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
};
static struct block_defs block_dorq_defs = {
- "dorq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "dorq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
};
static struct block_defs block_brb_defs = {
- "brb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "brb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
};
static struct block_defs block_src_defs = {
- "src", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "src",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
};
static struct block_defs block_prs_defs = {
- "prs", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "prs",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
};
static struct block_defs block_tsdm_defs = {
- "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsdm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
};
static struct block_defs block_msdm_defs = {
- "msdm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msdm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
};
static struct block_defs block_usdm_defs = {
- "usdm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usdm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
};
static struct block_defs block_xsdm_defs = {
- "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsdm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
};
static struct block_defs block_ysdm_defs = {
- "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysdm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
};
static struct block_defs block_psdm_defs = {
- "psdm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psdm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
};
static struct block_defs block_tsem_defs = {
- "tsem", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsem",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
};
static struct block_defs block_msem_defs = {
- "msem", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msem",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
};
static struct block_defs block_usem_defs = {
- "usem", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usem",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
};
static struct block_defs block_xsem_defs = {
- "xsem", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsem",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
};
static struct block_defs block_ysem_defs = {
- "ysem", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysem",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
};
static struct block_defs block_psem_defs = {
- "psem", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psem",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
};
static struct block_defs block_rss_defs = {
- "rss", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "rss",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
};
static struct block_defs block_tmld_defs = {
- "tmld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "tmld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
};
static struct block_defs block_muld_defs = {
- "muld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "muld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
};
static struct block_defs block_yuld_defs = {
- "yuld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "yuld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
};
static struct block_defs block_xyld_defs = {
- "xyld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xyld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
};
static struct block_defs block_prm_defs = {
- "prm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "prm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
};
static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb1",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
};
static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
};
static struct block_defs block_rpb_defs = {
- "rpb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rpb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
};
static struct block_defs block_btb_defs = {
- "btb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ "btb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
};
static struct block_defs block_pbf_defs = {
- "pbf", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
};
static struct block_defs block_rdif_defs = {
- "rdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
};
static struct block_defs block_tdif_defs = {
- "tdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
};
static struct block_defs block_cdu_defs = {
- "cdu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "cdu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
};
static struct block_defs block_ccfc_defs = {
- "ccfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "ccfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
};
static struct block_defs block_tcfc_defs = {
- "tcfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "tcfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
};
static struct block_defs block_igu_defs = {
- "igu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "igu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
};
static struct block_defs block_cau_defs = {
- "cau", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "cau",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
};
static struct block_defs block_umac_defs = {
- "umac", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "umac",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
- "nig", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "nig",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
};
static struct block_defs block_wol_defs = {
- "wol", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "wol",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
};
static struct block_defs block_bmbn_defs = {
- "bmbn", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ "bmbn",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
- "nwm", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "nwm",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
};
static struct block_defs block_nws_defs = {
- "nws", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "nws",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+ NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+ NWS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
- "ms", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "ms",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+ MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+ MS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
- "phy_pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "phy_pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
};
static struct block_defs block_led_defs = {
- "led", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+ "avs_wrap", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+ "ptld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_ms_defs,
&block_phy_pcie_defs,
&block_led_defs,
+ &block_avs_wrap_defs,
+ &block_rgfs_defs,
+ &block_tgfs_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
- {256, 256, 320},
- {32, 32, 32} },
+ {256, 320},
+ {32, 32} },
{ "rss_mem_key_msb", "rss_key", 1024,
- {128, 128, 208},
- {256, 256, 256} },
+ {128, 208},
+ {256, 256} },
{ "rss_mem_key_lsb", "rss_key", 2048,
- {128, 128, 208},
- {64, 64, 64} },
+ {128, 208},
+ {64, 64} },
{ "rss_mem_info", "rss_info", 3072,
- {128, 128, 208},
- {16, 16, 16} },
+ {128, 208},
+ {16, 16} },
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 128), (128 * 208)},
- {16, 16, 16} }
+ {(128 * 128), (128 * 208)},
+ {16, 16} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 4800, 5632} },
+ {4800, 5632} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 2880, 3680} },
+ {2880, 3680} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152, 1152} }
+ {1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
};
static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (!dev_data->grc.params_initialized) {
+ qed_dbg_grc_set_params_default(p_hwfn);
+ dev_data->grc.params_initialized = 1;
+ }
+}
+
/* Initializes debug data for the specified device */
static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB_B0;
- dev_data->mode_enable[MODE_BB_B0] = 1;
+ dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
+
+ /* Initializes the GRC parameters */
+ qed_dbg_grc_init_params(p_hwfn);
+
dev_data->initialized = true;
return DBG_STATUS_OK;
}
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
int printed_chars;
u32 offset = 0;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW image/version from PRAM in a non-reset SEMI */
bool found = false;
u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
+ u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
/* Find platform string and dump global params section header */
offset += qed_dump_section_hdr(dump_buf + offset,
- dump,
- "global_params",
- NUM_COMMON_GLOBAL_PARAMS +
- num_specific_global_params);
+ dump, "global_params", num_params);
/* Store params */
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
}
}
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
- return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- if (!dev_data->grc.param_set_by_user[i])
- dev_data->grc.param_val[i] =
- s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
/* Returns true if the specified entity (indicated by GRC param) should be
* included in the dump, false otherwise.
*/
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
}
}
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
{
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
* The following parameters are dumped:
* - 'count' = num_dumped_entries
* - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
* - 'param_name' = param_val (user param, dumped only if param_name != NULL and
* param_val != NULL)
*/
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+ if (dump)
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ else
+ offset += len;
+ return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+ u32 len)
+{
+ if (dump)
+ *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+ return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf,
bool dump, u32 addr, u32 len)
{
- u32 offset = 0, i;
+ u32 offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
+ return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 total_len,
+ u32 read_len, u32 skip_len)
+{
+ u32 offset = 0, reg_offset = 0;
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
if (dump) {
- *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
- for (i = 0; i < len; i++, addr++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- DWORDS_TO_BYTES(addr));
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32,
+ read_len,
+ total_len - reg_offset);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len);
+ reg_offset += curr_len;
+ addr += curr_len;
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32,
+ skip_len,
+ total_len - skip_len);
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
+ reg_offset += curr_len;
+ addr += curr_len;
+ }
+ }
} else {
- offset += len + 1;
+ offset += total_len;
}
return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
&input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ addr = GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset, dump,
- GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS),
- GET_FIELD(reg->data,
- DBG_DUMP_REG_LENGTH));
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
(*num_dumped_reg_entries)++;
}
} else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct chip_platform_defs *p_platform_defs;
u32 offset = 0, input_offset = 0;
- u8 port_id, pf_id;
+ struct chip_defs *p_chip_defs;
+ u8 port_id, pf_id, vf_id;
+ u16 fid;
+
+ p_chip_defs = &s_chip_defs[dev_data->chip_id];
+ p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
switch (split_type_id) {
case SPLIT_TYPE_NONE:
- case SPLIT_TYPE_VF:
offset += qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
break;
case SPLIT_TYPE_PORT:
- for (port_id = 0;
- port_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_ports;
+ for (port_id = 0; port_id < p_platform_defs->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
break;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0;
- pf_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_pfs;
+ for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
pf_id++) {
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, pf_id);
- offset += qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "pf", pf_id, param_name,
- param_val);
+ u8 pfid_shift =
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+ if (dump) {
+ fid = pf_id << pfid_shift;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_VF:
+ for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ vf_id++) {
+ u8 vfvalid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+ u8 vfid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+ if (dump) {
+ fid = BIT(vfvalid_shift) |
+ (vf_id << vfid_shift);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "vf", vf_id,
+ param_name,
+ param_val);
}
break;
default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
/* Pretend to original PF */
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ if (dump) {
+ fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
return offset;
}
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs
- [i].addr), 1);
+ addr,
+ 1);
num_regs++;
}
}
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
&attn_reg_arr[reg_idx];
u16 modes_buf_offset;
bool eval_mode;
+ u32 addr;
/* Check mode */
eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
/* Mode match - read and dump registers */
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- reg_data->mask_address,
- 1);
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS),
- 1);
+ addr = reg_data->mask_address;
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries += 2;
}
}
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 addr;
+
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
dump)
continue;
+ addr =
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- BYTES_TO_DWORDS(s_storm_defs[storm_id].
- sem_fast_mem_addr +
- SEM_FAST_REG_STALLED),
- 1);
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries++;
}
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, addr;
+
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ dump, 2, "eng", -1, NULL, NULL);
+
+ /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+ * skipped).
+ */
+ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+ offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+ addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+ offset +=
+ qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+
+ return offset;
+}
+
/* Dumps a GRC memory header (section and params).
* The following parameters are dumped:
* name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
* width - bit_width is dumped if it's not zero.
* packed - packed=1 is dumped if it's not false.
* mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 offset = 0;
char buf[64];
- if (!dword_len)
+ if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from %s...\n",
- dword_len, buf);
+ len, buf);
} else {
/* Dump address */
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr", byte_addr);
- if (dump && dword_len > 64)
+ dump, "addr",
+ DWORDS_TO_BYTES(addr));
+ if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- dword_len, byte_addr);
+ len, (u32)DWORDS_TO_BYTES(addr));
}
/* Dump len */
- offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
/* Dump bit width */
if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
name,
- byte_addr,
- dword_len,
+ addr,
+ len,
bit_width,
packed,
mem_group, is_storm, storm_letter);
- if (dump) {
- u32 i;
-
- for (i = 0; i < dword_len;
- i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- } else {
- offset += dword_len;
- }
-
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
return offset;
}
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
if (qed_grc_is_mem_included(p_hwfn,
(enum block_id)cond_hdr->block_id,
mem_group_id)) {
- u32 mem_byte_addr =
- DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS));
+ u32 mem_addr = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS);
u32 mem_len = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_LENGTH);
+ enum dbg_grc_params grc_param;
char storm_letter = 'a';
bool is_storm = false;
/* Update memory length for CCFC/TCFC memories
* according to number of LCIDs/LTIDs.
*/
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS)
- * (mem_len / MAX_LCIDS);
- else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id ==
+ MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS)
- * (mem_len / MAX_LTIDS);
+ grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
/* If memory is associated with Storm, update
* Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn, p_ptt,
dump_buf + offset, dump, NULL,
- mem_byte_addr, mem_len, 0,
+ mem_addr, mem_len, 0,
false,
s_mem_group_names[mem_group_id],
is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE +
- DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- 32,
- false,
- "ior",
- true,
- s_storm_defs
- [storm_id].letter);
- }
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 dwords, addr;
+
+ dwords = storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE;
+ addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ storm->letter);
}
}
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_size = (num_entries * entry_width) / 32;
+ u32 total_dwords = (num_entries * entry_width) / 32;
+ u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
bool packed = (entry_width == 16);
- u32 addr = rss_defs->addr;
- u32 i, j;
+ u32 rss_addr = rss_defs->addr;
+ u32 i, addr;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
rss_defs->mem_name,
- addr,
- total_size,
+ 0,
+ total_dwords,
entry_width,
packed,
rss_defs->type_name, false, 0);
if (!dump) {
- offset += total_size;
+ offset += total_dwords;
continue;
}
/* Dump RSS data */
- for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
- qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
- for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
- *(dump_buf + offset) =
- qed_rd(p_hwfn, p_ptt,
- RSS_REG_RSS_RAM_DATA +
- DWORDS_TO_BYTES(j));
+ for (i = 0; i < total_dwords;
+ i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ addr,
+ size);
}
}
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 total_blocks, ram_size, offset = 0, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
- u32 ram_size, total_blocks;
- u32 offset = 0, i, j;
+ struct big_ram_defs *big_ram;
- total_blocks =
- s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ big_ram = &s_big_ram_defs[big_ram_id];
+ total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
- strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
- strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(type_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
+ strncpy(mem_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
/* Read and dump Big RAM data */
for (i = 0; i < total_blocks / 2; i++) {
- qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
- i);
- for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
- s_big_ram_defs[big_ram_id].
- data_reg_addr +
- DWORDS_TO_BYTES(j));
+ u32 addr, len;
+
+ qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+ addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+ len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
bool block_enable[MAX_BLOCK_ID] = { 0 };
+ u32 offset = 0, addr;
bool halted = false;
- u32 offset = 0;
/* Halt MCP */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_SCRATCH,
+ BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
0, false, "MCP", false, 0);
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_CPU_REG_FILE,
+ BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
0, false, "MCP", false, 0);
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, "eng", -1, "block", "MCP");
+ addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (MISC_REG_SHARED_MEM_ADDR), 1);
+ addr,
+ 1);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
{
u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id, addr, i;
+ u32 offset = 0, block_id, line_id;
struct block_defs *p_block_defs;
if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
if (dump && !dev_data->block_in_reset[block_id]) {
u8 dbg_client_id =
p_block_defs->dbg_client_id[dev_data->chip_id];
+ u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ u32 len = STATIC_DEBUG_LINE_DWORDS;
/* Enable block's client */
qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
0xf, 0, 0, 0);
/* Read debug line info */
- for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
- i < STATIC_DEBUG_LINE_DWORDS;
- i++, offset++, addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
- addr);
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
u8 i, port_mode = 0;
u32 offset = 0;
- /* Check if emulation platform */
*num_dumped_dwords = 0;
- /* Fill GRC parameters that were not set by the user with their default
- * value.
- */
- qed_dbg_grc_set_params_default(p_hwfn);
-
/* Find port mode */
if (dump) {
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
if (qed_grc_get_param
(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
- else
- DP_NOTICE(p_hwfn,
- "Failed to mask parities using MFW\n");
}
}
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset,
dump,
block_enable, NULL, NULL);
+
+ /* Dump special registers */
+ offset += qed_grc_dump_special_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
}
/* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
- u32 grc_addr =
- DWORDS_TO_BYTES(GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS));
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
/* Write register header */
struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
memset(reg_hdr, 0, sizeof(*reg_hdr));
reg_hdr->size = reg->size;
SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
/* Write register values */
- for (i = 0; i < reg->size;
- i++, offset++, grc_addr += 4)
- dump_buf[offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
- }
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size);
}
+ }
}
return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
- u32 i, j, offset = 0;
+ u32 i, offset = 0;
u16 entry_id;
u8 reg_id;
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
/* Read current entry of all condition registers */
- if (dump) {
- u32 next_reg_offset = 0;
-
- for (reg_id = 0;
- reg_id < rule->num_cond_regs;
- reg_id++) {
- const struct dbg_idle_chk_cond_reg
- *reg = &cond_regs[reg_id];
-
- /* Find GRC address (if it's a memory,
- * the address of the specific entry is
- * calculated).
- */
- u32 grc_addr =
- DWORDS_TO_BYTES(
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS));
-
- if (reg->num_entries > 1 ||
- reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two
- (reg->entry_size) : 1;
-
- grc_addr +=
- DWORDS_TO_BYTES(
- (reg->start_entry +
- entry_id)
- * padded_entry_size);
- }
+ u32 next_reg_offset = 0;
- /* Read registers */
- if (next_reg_offset + reg->entry_size >=
- IDLE_CHK_MAX_ENTRIES_SIZE) {
- DP_NOTICE(p_hwfn,
- "idle check registers entry is too large\n");
- return 0;
- }
+ for (reg_id = 0; reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg =
+ &cond_regs[reg_id];
- for (j = 0; j < reg->entry_size;
- j++, next_reg_offset++,
- grc_addr += 4)
- cond_reg_values[next_reg_offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
+ /* Find GRC address (if it's a memory,the
+ * address of the specific entry is calculated).
+ */
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
+
+ addr += (reg->start_entry + entry_id) *
+ padded_entry_size;
}
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ next_reg_offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ cond_reg_values +
+ next_reg_offset,
+ dump, addr,
+ reg->entry_size);
}
/* Call rule's condition function - a return value of
* true indicates failure.
*/
if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values) || !dump) {
+ imm_values)) {
offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct mcp_file_att file_att;
/* Call NVRAM get file command */
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type, &ret_mcp_resp, &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att) != 0)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att);
/* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ if (nvm_result ||
+ (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
/* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
u32 running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- enum dbg_status status;
u32 nvram_image_type;
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
- status = qed_find_nvram_image(p_hwfn,
- p_ptt,
- nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
-
- return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
- u32 i;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
- size_in_dwords, grc_addr);
- for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
- buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+ return qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
}
/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
- u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
- u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
+ bool mcp_access;
int halted = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
*num_dumped_dwords = 0;
/* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump) {
+ if (dump && mcp_access) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "size", trace_data_size_dwords);
/* Read trace data from scratchpad into dump buffer */
- if (dump)
- qed_mcp_trace_read_data(p_hwfn,
- p_ptt,
- trace_data_grc_addr,
- trace_data_size_dwords,
- dump_buf + offset);
- offset += trace_data_size_dwords;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(trace_data_grc_addr),
+ trace_data_size_dwords);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "mcp_trace_meta", 1);
/* Read trace meta info */
- status = qed_mcp_trace_get_meta_info(p_hwfn,
- p_ptt,
- trace_data_size_bytes,
- &running_bundle_id,
- &trace_meta_offset_bytes,
- &trace_meta_size_bytes);
- if (status != DBG_STATUS_OK)
- return status;
+ if (mcp_access) {
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords =
+ BYTES_TO_DWORDS(trace_meta_size_bytes);
+ }
- /* Dump trace meta size param (trace_meta_size_bytes is always
- * dword-aligned).
- */
- trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- trace_meta_size_dwords);
+ /* Dump trace meta size param */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_meta_size_dwords);
/* Read trace meta image into dump buffer */
- if (dump) {
+ if (dump && trace_meta_size_dwords)
status = qed_mcp_trace_read_meta(p_hwfn,
- p_ptt,
- trace_meta_offset_bytes,
- trace_meta_size_bytes,
- dump_buf + offset);
- if (status != DBG_STATUS_OK)
- return status;
- }
-
- offset += trace_meta_size_dwords;
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status == DBG_STATUS_OK)
+ offset += trace_meta_size_dwords;
*num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ /* If no mcp access, indicate that the dump doesn't contain the meta
+ * data from NVRAM.
+ */
+ return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct fw_asserts_ram_section *asserts;
char storm_letter_str[2] = "?";
struct fw_info fw_info;
- u32 offset = 0, i;
+ u32 offset = 0;
u8 storm_id;
/* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
- last_list_idx, element_addr;
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ u32 last_list_idx, addr;
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+ asserts = &fw_info.fw_asserts_section;
+
/* Dump FW Asserts section header and params */
storm_letter_str[0] = s_storm_defs[storm_id].letter;
offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
storm_letter_str);
offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- fw_info.fw_asserts_section.
- list_element_dword_size);
+ asserts->list_element_dword_size);
if (!dump) {
- offset += fw_info.fw_asserts_section.
- list_element_dword_size;
+ offset += asserts->list_element_dword_size;
continue;
}
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
fw_asserts_section_addr =
s_storm_defs[storm_id].sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
- RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
- section_ram_line_offset);
+ RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
next_list_idx_addr =
fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_next_index_dword_offset);
+ DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
? next_list_idx
- : fw_info.fw_asserts_section.list_num_elements)
- - 1;
- element_addr =
- fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_dword_offset) +
- last_list_idx *
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_element_dword_size);
- for (i = 0;
- i < fw_info.fw_asserts_section.list_element_dword_size;
- i++, offset++, element_addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ : asserts->list_num_elements) - 1;
+ addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+ asserts->list_dword_offset +
+ last_list_idx * asserts->list_element_dword_size;
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, addr,
+ asserts->list_element_dword_size);
}
/* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
- /* Clear all GRC params */
- qed_dbg_grc_clear_params(p_hwfn);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return status;
}
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
/* Idle Check Dump */
*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return DBG_STATUS_OK;
}
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ /* validate buffer size */
+ status =
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
&needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
+ if (status != DBG_STATUS_OK &&
+ status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Perform dump */
- return qed_mcp_trace_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+ status = qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_reg_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_igu_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_protection_override_dump(p_hwfn,
- p_ptt,
- dump_buf, true, num_dumped_dwords);
+
+ status = qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf,
+ true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
REG_FIFO_ELEMENT_ADDR_FACTOR,
s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
- GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PORT),
s_privilege_strs[GET_FIELD(elements[i].
data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
i, address,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WRITE),
s_protection_strs[GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
*/
rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
&buf_size_dwords);
- if (rc != DBG_STATUS_OK)
+ if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e518f914eab1..8b5df71aa3c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -674,11 +674,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_B0);
+ if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (QED_IS_AH(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+ p_hwfn->cdev->type);
+ return -EINVAL;
+ }
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -693,7 +701,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
- return;
+ return -EINVAL;
}
switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +727,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
+
+ return 0;
}
/* Init run time data for all PFs on an engine. */
@@ -754,10 +764,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
- u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -814,7 +824,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -1135,7 +1146,9 @@ int qed_hw_init(struct qed_dev *cdev,
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- qed_calc_hw_mode(p_hwfn);
+ rc = qed_calc_hw_mode(p_hwfn);
+ if (rc)
+ return rc;
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) {
@@ -1485,10 +1498,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
/* clear indirect access */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+ } else {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1610,6 +1638,7 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
enum qed_resources res_id)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
u32 dflt_resc_num = 0;
@@ -1620,17 +1649,22 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = sb_cnt_info.sb_cnt;
break;
case QED_L2_QUEUE:
- dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2
+ : MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case QED_VPORT:
dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+ dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2
+ : MAX_NUM_VPORTS_BB) / num_funcs;
break;
case QED_RSS_ENG:
- dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2
+ : ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case QED_PQ:
/* The granularity of the PQs is 8 */
- dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
+ dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2
+ : MAX_QM_TX_QUEUES_BB) / num_funcs;
dflt_resc_num &= ~0x7;
break;
case QED_RL:
@@ -1642,7 +1676,8 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2
+ : PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case QED_LL2_QUEUE:
dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
@@ -1653,7 +1688,10 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+ dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+ : RDMA_NUM_STATISTIC_COUNTERS_BB) /
+ num_funcs;
+
break;
default:
break;
@@ -1780,6 +1818,7 @@ out:
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
u8 res_id;
int rc;
@@ -1790,7 +1829,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
}
/* Sanity for ILT */
- if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
+ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1860,9 +1900,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
@@ -1976,8 +2022,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct qed_dev *cdev = p_hwfn->cdev;
- num_funcs = MAX_NUM_PFS_BB;
+ num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
@@ -1990,12 +2037,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
- if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
- num_funcs = 0;
- eng_mask = 0xaaaa;
+ if (QED_IS_BB(cdev)) {
+ if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
} else {
num_funcs = 1;
- eng_mask = 0x5554;
+ eng_mask = 0xfffe;
}
/* Get the number of the enabled functions on the engine */
@@ -2027,24 +2079,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 port_mode;
- int rc;
- /* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
- rc = qed_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
- }
-
- /* Read the port mode */
- port_mode = qed_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2097,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
/* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1;
}
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->cdev->num_ports_in_engines = 0;
+
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->cdev->num_ports_in_engines++;
+ }
+
+ if (!p_hwfn->cdev->num_ports_in_engines) {
+ DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ int rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ qed_hw_info_port_num(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt);
@@ -2096,19 +2184,33 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
static int qed_get_dev_info(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+ /* Determine type */
+ device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case QED_DEV_ID_MASK_BB:
+ cdev->type = QED_DEV_TYPE_BB;
+ break;
+ case QED_DEV_ID_MASK_AH:
+ cdev->type = QED_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+ return -EBUSY;
+ }
+
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev);
- cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
@@ -2128,7 +2230,10 @@ static int qed_get_dev_info(struct qed_dev *cdev)
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns,
- "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ QED_IS_BB(cdev) ? "BB" : "AH",
+ 'A' + cdev->chip_rev,
+ (int)cdev->chip_metal,
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
@@ -3363,3 +3468,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+ return QED_IS_BB(cdev) ? 2 : 1;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb663bb..e9acdc96ba84 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
+ CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved1[5];
};
union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
-struct core_tx_bd_flags {
- u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags;
+ struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+enum dcb_dhcp_update_flag {
+ DONT_UPDATE_DCB_DHCP,
+ UPDATE_DCB,
+ UPDATE_DSCP,
+ UPDATE_DCB_DSCP,
+ MAX_DCB_DHCP_UPDATE_FLAG
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
ETH_EDPM_OUT_OF_SYNC,
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
+ ETH_ANTI_SPOOFING_ERR,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
struct regpair ll2_mac_filter_discard;
struct regpair ll2_conn_disabled_discard;
struct regpair iscsi_irregular_pkt;
- struct regpair reserved;
+ struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
+ struct regpair reserved;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x19d0000,
+ GRCBASE_TGFS = 0x19e0000,
+ GRCBASE_PTLD = 0x19f0000,
+ GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_TGFS,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
__le32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
- __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ __le16 block_attn_offset;
__le16 reserved;
__le32 sts_val;
__le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
- __le16 attn_idx_offset;
+ __le16 block_attn_offset;
__le32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
__le32 sts_clr_address;
__le32 mask_address;
};
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
__le16 reserved;
- struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
};
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF,
+ DBG_BUS_FILTER_TYPE_PRE,
+ DBG_BUS_FILTER_TYPE_POST,
+ DBG_BUS_FILTER_TYPE_ON,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
/* Debug bus frame modes */
enum dbg_bus_frame_modes {
DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD,
+ DBG_BUS_POST_TRIGGER_DROP,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
/* Debug bus target IDs */
enum dbg_bus_targets {
/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
/* GRC Dump data */
struct dbg_grc_data {
- __le32 param_val[40]; /* Value of each GRC parameter. Array size must
- * match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
- * set by the user (0/1). Array size must
- * match the enum dbg_grc_params.
- */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 param_val[48];
};
/* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
};
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
struct dbg_bus_data bus; /* Debug Bus data */
struct idle_chk_data idle_chk; /* Idle Check data */
u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
*/
u8 chip_id; /* Chip ID (from enum chip_ids) */
u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
enum init_modes {
MODE_RESERVED,
- MODE_BB_B0,
+ MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_40G,
MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -2686,6 +2783,13 @@ struct iro {
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ * default value.
+ *
+ * @param p_hwfn - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
* @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
* GRC Dump.
*
@@ -3418,7 +3522,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
@@ -3482,7 +3586,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x4cb0, 0x80, 0x0, 0x0, 0x80},
{0x6318, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3625,13 @@ static const struct iro iro_arr[47] = {
{0xd888, 0x38, 0x0, 0x0, 0x24},
{0x12c38, 0x10, 0x0, 0x0, 0x8},
{0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x30, 0x0, 0x0, 0x10},
- {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5000, 0x10, 0x0, 0x0, 0x10},
+ {0x5020, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -4595,6 +4699,12 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+enum eth_ip_type {
+ ETH_IPV4,
+ ETH_IPV6,
+ MAX_ETH_IP_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -4944,7 +5054,10 @@ struct vport_update_ramrod_data_cmn {
u8 update_mtu_flg;
__le16 mtu;
- u8 reserved[2];
+ u8 update_ctl_frame_checks_en_flg;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -4962,6 +5075,492 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -6165,7 +6764,7 @@ struct ystorm_roce_conn_st_ctx {
};
struct xstorm_roce_conn_st_ctx {
- struct regpair temp[22];
+ struct regpair temp[24];
};
struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +6819,7 @@ struct roce_create_qp_req_ramrod_data {
__le16 mtu;
__le16 pd;
__le16 sq_num_pages;
- __le16 reseved2;
+ __le16 low_latency_phy_queue;
struct regpair sq_pbl_addr;
struct regpair orq_pbl_addr;
__le16 local_mac_addr[3];
@@ -6234,7 +6833,7 @@ struct roce_create_qp_req_ramrod_data {
u8 stats_counter_id;
u8 reserved3[7];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -6282,15 +6881,16 @@ struct roce_create_qp_resp_ramrod_data {
__le32 dst_gid[4];
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
- __le32 reserved2[2];
+ __le16 low_latency_phy_queue;
+ u8 reserved2[6];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +6899,7 @@ struct roce_destroy_qp_req_ramrod_data {
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8026,7 @@ struct ystorm_fcoe_conn_st_ctx {
u8 fcp_rsp_size;
__le16 mss;
struct regpair reserved;
+ __le16 min_frame_size;
u8 protection_info_flags;
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8045,6 @@ struct ystorm_fcoe_conn_st_ctx {
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
- u8 reserved3[2];
};
struct fcoe_vlan_fields {
@@ -8273,10 +8873,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
@@ -8322,10 +8922,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
@@ -8335,8 +8935,8 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
@@ -8440,7 +9040,7 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg10;
__le32 reg11;
__le32 exp_stat_sn;
- __le32 reg13;
+ __le32 ongoing_fast_rxmit_seq;
__le32 reg14;
__le32 reg15;
__le32 reg16;
@@ -8466,10 +9066,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
@@ -8490,10 +9090,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
@@ -8539,7 +9139,7 @@ struct tstorm_iscsi_conn_ag_ctx {
__le32 reg6;
__le32 reg7;
__le32 reg8;
- u8 byte2;
+ u8 cid_offload_cnt;
u8 byte3;
__le16 word0;
};
@@ -8831,11 +9431,24 @@ struct eth_stats {
u64 r511;
u64 r1023;
u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
u64 rfcs;
u64 rxcf;
u64 rxpf;
@@ -8852,14 +9465,36 @@ struct eth_stats {
u64 t511;
u64 t1023;
u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
u64 txpf;
u64 txpp;
- u64 tlpiec;
- u64 tncl;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
u64 rbyte;
u64 rxuca;
u64 rxmca;
@@ -9067,6 +9702,10 @@ struct dcb_dscp_map {
struct public_global {
u32 max_path;
u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
u32 debug_mb_offset;
u32 phymod_dbg_mb_offset;
struct couple_mode_teaming cmt;
@@ -9659,6 +10298,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+
u32 e_lane_cfg1;
u32 e_lane_cfg2;
u32 f_lane_cfg1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a6852695..2a50e2b7568f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands with
- * the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
- if (is_bb_a0)
- cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
+ bool rl_valid = p_params->pq_params[i].rl_valid &&
+ (p_params->pq_params[i].vport_id <
+ MAX_QM_GLOBAL_RLS);
+
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
+
+ if (p_params->pq_params[i].rl_valid && !rl_valid)
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- p_params->pq_params[i].rl_valid ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg,
+ QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- p_params->pq_params[i].rl_valid ?
+ rl_valid ?
p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id /
+ QM_PF_QUEUE_GROUP_SIZE] |=
+ BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
- (p_params->pf_id % MAX_NUM_PFS_BB);
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+ crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
return 0;
}
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{
u8 i, vport_id;
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e0d4dc..4a2e7be5bf72 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
}
/* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+ buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index df932be5a4e5..4385ccbb5efb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1470,13 +1470,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1509,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1546,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1588,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats)
{
+ struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1605,54 +1618,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0d3cef409c96..178650aa0c6c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
@@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
@@ -1591,33 +1591,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++;
}
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2,
- struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type type,
- dma_addr_t first_frag,
- u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
- u16 frag_idx;
+ u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- start_bd->bd_flags.as_bitfield = bd_flags;
- start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
- CORE_TX_BD_FLAGS_START_BD_SHIFT;
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+ bd_data |= bd_flags;
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
@@ -1642,9 +1643,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0;
- (*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0;
}
@@ -2241,11 +2241,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb);
- flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..766c6f39ea63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -238,6 +238,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+ dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
@@ -1653,8 +1654,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 368e88de146c..bdbfd6d4485e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -479,11 +479,10 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * \
+ qed_device_num_engines((_p_hwfn)->cdev)))
+
struct qed_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */
spinlock_t lock;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index d27aa85da23c..80c9c0b172dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -262,12 +262,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
/* Pause free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_AH(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
/* Resume free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+ }
/* Disable drift register */
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df60cd2..6d4ac7e2ee83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -160,13 +160,13 @@
0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
-#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
0x2aa404UL
-#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
0x2aa408UL
-#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
0x2aa40cUL
-#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
@@ -356,6 +356,10 @@
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+ 0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+ 0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
+#define NWS_REG_DBG_SELECT \
+ 0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+ 0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+ 0x700130UL
+#define NWS_REG_DBG_FORCE_VALID \
+ 0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME \
+ 0x700138UL
+#define MS_REG_DBG_SELECT \
+ 0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+ 0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+ 0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+ 0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+ 0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
0x000b48UL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+ 4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define NWS_REG_NWS_CMU \
@@ -1520,4 +1550,13 @@
#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b28591c..4bef5c59627c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,13 +66,27 @@
#include "qed_roce.h"
#include "qed_ll2.h"
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+ u16 icid =
+ (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+ /* icid release in this async event can occur only if the icid
+ * was offloaded to the FW. In case it wasn't offloaded this is
+ * handled in qed_roce_sp_destroy_qp.
+ */
+ qed_roce_free_real_icid(p_hwfn, icid);
+ } else {
+ struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
- p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
- p_eqe->opcode, &p_eqe->data);
+ events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+ fw_event_code,
+ &rdma_data->async_handle);
+ }
}
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
@@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
+}
+
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
@@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Queue zone lines are shared between RoCE and L2 in such a way that
* they can be used by each without obstructing the other.
*/
- p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
goto free_tid_map;
}
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
kfree(p_rdma_info->tid_map.bitmap);
free_toggle_map:
@@ -273,7 +315,22 @@ free_rdma_info:
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ int wait_count = 0;
+
+ /* when destroying a_RoCE QP the control is returned to the user after
+ * the synchronous part. The asynchronous part may take a little longer.
+ * We delay for a short while if an async destroy QP is still expected.
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+ break;
+ }
+ }
kfree(p_rdma_info->cid_map.bitmap);
kfree(p_rdma_info->tid_map.bitmap);
@@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
u32 addr;
p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,6 +1212,13 @@ err:
return rc;
}
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
@@ -1147,7 +1227,8 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1310,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+ &qm_params);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1338,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
- rc, physical_queue0);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "rc = %d regular physical queue = 0x%x\n", rc,
+ regular_latency_queue);
if (rc)
goto err;
qp->resp_offloaded = true;
+ qp->cq_prod = 0;
+
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn, qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1280,7 +1371,8 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1443,19 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
- p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
- qp->sq_cq_id);
+ p_ramrod->cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+ &qm_params);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1474,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
goto err;
qp->req_offloaded = true;
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn,
+ qp->icid + 1 -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1577,7 +1677,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
- u32 *num_invalidated_mw)
+ u32 *num_invalidated_mw,
+ u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1689,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
- if (!qp->resp_offloaded)
+ *num_invalidated_mw = 0;
+ *cq_prod = qp->cq_prod;
+
+ if (!qp->resp_offloaded) {
+ /* If a responder was never offload, we need to free the cids
+ * allocated in create_qp as a FW async event will never arrive
+ */
+ u32 cid;
+
+ cid = qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
return 0;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1739,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+ *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+ qp->cq_prod = *cq_prod;
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1944,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
out_params->draining = false;
- if (rq_err_state)
+ if (rq_err_state || sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
- else if (sq_err_state)
- qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
@@ -1849,10 +1964,9 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
- u32 start_cid;
+ u32 cq_prod;
int rc;
/* Destroys the specified QP */
@@ -1866,7 +1980,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
+ &num_invalidated_mw,
+ &cq_prod);
if (rc)
return rc;
@@ -1881,21 +1996,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
-
- spin_lock_bh(&p_rdma_info->lock);
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_rdma_info->proto);
-
- /* Release responder's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid - start_cid);
-
- /* Release requester's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid + 1 - start_cid);
-
- spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
@@ -2110,12 +2210,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
+ u32 cq_prod;
+
+ /* Send destroy responder ramrod */
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+ qp,
+ &num_invalidated_mw,
+ &cq_prod);
- rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
if (rc)
return rc;
+ qp->cq_prod = cq_prod;
+
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
@@ -2454,6 +2561,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
return rc;
}
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 start_cid, cid, xcid;
+
+ /* an even icid belongs to a responder while an odd icid belongs to a
+ * requester. The 'cid' received as an input can be either. We calculate
+ * the "partner" icid and call it xcid. Only if both are free then the
+ * "cid" map can be cleared.
+ */
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+ cid = icid - start_cid;
+ xcid = cid ^ 1;
+
+ spin_lock_bh(&p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+ if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+ }
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2905,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2ab7fa..3ccc08a7c995 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -82,6 +82,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map;
struct qed_bmap srq_map;
struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
struct qed_bmap dpi_map;
struct qed_bmap toggle_bits;
struct qed_rdma_events events;
@@ -92,6 +93,7 @@ struct qed_rdma_info {
u32 num_qps;
u32 num_mrs;
u16 queue_zone_base;
+ u16 max_queue_zones;
enum protocol_type proto;
};
@@ -153,6 +155,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
+ u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -163,8 +166,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet);
#else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a9f0cf..54fbe3789cf3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
- qed_async_roce_event(p_hwfn, p_eqe);
+ qed_roce_async_event(p_hwfn, p_eqe->opcode,
+ &p_eqe->data.rdma_data);
return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
- if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
- u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
- qed_ooo_release_connection_isles(p_hwfn,
- p_hwfn->p_ooo_info,
- cid);
- return 0;
- }
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 253c2bbe1e4e..16f503c9b0af 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -557,14 +557,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only by later
+ * to diffrentiate between the two.
*/
- cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (QED_PATH_ID(p_hwfn))
- cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n",
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2cfb86..e73a4a5165ee 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -50,7 +50,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 20
+#define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -58,7 +58,7 @@
#define DRV_MODULE_SYM qede
-struct qede_stats {
+struct qede_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -90,11 +90,6 @@ struct qede_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -111,17 +106,39 @@ struct qede_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
- u64 tx_pause_frames;
- u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
- u64 brb_truncates;
- u64 brb_discards;
- u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+ struct qede_stats_common common;
+
+ union {
+ struct qede_stats_bb bb;
+ struct qede_stats_ah ah;
+ };
};
struct qede_vlan {
@@ -158,6 +175,10 @@ struct qede_dev {
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
u8 req_num_tx;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 897953133245..4dcfe9614731 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -75,16 +75,33 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+ (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+ {QEDE_STAT_OFFSET(stat_name, type, base), \
+ QEDE_STAT_STRING(stat_name), \
+ attr}
+#define QEDE_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+ BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_bb, \
+ offsetof(struct qede_stats, bb), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_ah, \
+ offsetof(struct qede_stats, ah), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
- bool pf_only;
+ unsigned long attr;
+#define QEDE_STAT_PF_ONLY 0
+#define QEDE_STAT_BB_ONLY 1
+#define QEDE_STAT_AH_ONLY 2
} qede_stats_arr[] = {
QEDE_STAT(rx_ucast_bytes),
QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
QEDE_PF_STAT(rx_256_to_511_byte_packets),
QEDE_PF_STAT(rx_512_to_1023_byte_packets),
QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
- QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
QEDE_PF_STAT(tx_256_to_511_byte_packets),
QEDE_PF_STAT(tx_512_to_1023_byte_packets),
QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+ QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
QEDE_PF_STAT(rx_mac_crtl_frames),
QEDE_PF_STAT(tx_mac_ctrl_frames),
QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
QEDE_PF_STAT(rx_jabbers),
QEDE_PF_STAT(rx_undersize_packets),
QEDE_PF_STAT(rx_fragments),
- QEDE_PF_STAT(tx_lpi_entry_count),
- QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_BB_STAT(tx_lpi_entry_count),
+ QEDE_PF_BB_STAT(tx_total_collisions),
QEDE_PF_STAT(brb_truncates),
QEDE_PF_STAT(brb_discards),
QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
enum {
QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
}
}
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+ return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+ (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+ (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
/* Account for non-queue statistics */
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
strcpy(buf, qede_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
*buf = *((u64 *)(((void *)&edev->stats) +
qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
static int qede_get_sset_count(struct net_device *dev, int stringset)
{
struct qede_dev *edev = netdev_priv(dev);
- int num_stats = QEDE_NUM_STATS;
+ int num_stats = QEDE_NUM_STATS, i;
switch (stringset) {
case ETH_SS_STATS:
- if (IS_VF(edev)) {
- int i;
-
- for (i = 0; i < QEDE_NUM_STATS; i++)
- if (qede_stats_arr[i].pf_only)
- num_stats--;
- }
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_is_irrelevant_stat(edev, i))
+ num_stats--;
/* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 3a78c3f25157..abd99109e532 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_AH 0x8070
+#define CHIP_NUM_AH_IOV 0x8090
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
+
#endif
enum qede_pci_private {
@@ -110,6 +115,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -314,122 +323,135 @@ static int qede_close(struct net_device *ndev);
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
+ struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats);
- edev->stats.no_buff_discards = stats.no_buff_discards;
- edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
- edev->stats.ttl0_discard = stats.ttl0_discard;
- edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
- edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
- edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
- edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
- edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
- edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
- edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
- edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
- edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
- edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
- edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
- edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
- edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
- edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
- edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
- edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
- edev->stats.coalesced_events = stats.tpa_coalesced_events;
- edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
- edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
- edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
- edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
- edev->stats.rx_128_to_255_byte_packets =
- stats.rx_128_to_255_byte_packets;
- edev->stats.rx_256_to_511_byte_packets =
- stats.rx_256_to_511_byte_packets;
- edev->stats.rx_512_to_1023_byte_packets =
- stats.rx_512_to_1023_byte_packets;
- edev->stats.rx_1024_to_1518_byte_packets =
- stats.rx_1024_to_1518_byte_packets;
- edev->stats.rx_1519_to_1522_byte_packets =
- stats.rx_1519_to_1522_byte_packets;
- edev->stats.rx_1519_to_2047_byte_packets =
- stats.rx_1519_to_2047_byte_packets;
- edev->stats.rx_2048_to_4095_byte_packets =
- stats.rx_2048_to_4095_byte_packets;
- edev->stats.rx_4096_to_9216_byte_packets =
- stats.rx_4096_to_9216_byte_packets;
- edev->stats.rx_9217_to_16383_byte_packets =
- stats.rx_9217_to_16383_byte_packets;
- edev->stats.rx_crc_errors = stats.rx_crc_errors;
- edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
- edev->stats.rx_pause_frames = stats.rx_pause_frames;
- edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
- edev->stats.rx_align_errors = stats.rx_align_errors;
- edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
- edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
- edev->stats.rx_jabbers = stats.rx_jabbers;
- edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
- edev->stats.rx_fragments = stats.rx_fragments;
- edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
- edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
- edev->stats.tx_128_to_255_byte_packets =
- stats.tx_128_to_255_byte_packets;
- edev->stats.tx_256_to_511_byte_packets =
- stats.tx_256_to_511_byte_packets;
- edev->stats.tx_512_to_1023_byte_packets =
- stats.tx_512_to_1023_byte_packets;
- edev->stats.tx_1024_to_1518_byte_packets =
- stats.tx_1024_to_1518_byte_packets;
- edev->stats.tx_1519_to_2047_byte_packets =
- stats.tx_1519_to_2047_byte_packets;
- edev->stats.tx_2048_to_4095_byte_packets =
- stats.tx_2048_to_4095_byte_packets;
- edev->stats.tx_4096_to_9216_byte_packets =
- stats.tx_4096_to_9216_byte_packets;
- edev->stats.tx_9217_to_16383_byte_packets =
- stats.tx_9217_to_16383_byte_packets;
- edev->stats.tx_pause_frames = stats.tx_pause_frames;
- edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
- edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
- edev->stats.tx_total_collisions = stats.tx_total_collisions;
- edev->stats.brb_truncates = stats.brb_truncates;
- edev->stats.brb_discards = stats.brb_discards;
- edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+ p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+ p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+ p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+ p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+ p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+ p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+ p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+ p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+ p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+ p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+ p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+ p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+ p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+ p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+ p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+ p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+ p_common->coalesced_events = stats.common.tpa_coalesced_events;
+ p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+ p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+ p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+ p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+ p_common->rx_65_to_127_byte_packets =
+ stats.common.rx_65_to_127_byte_packets;
+ p_common->rx_128_to_255_byte_packets =
+ stats.common.rx_128_to_255_byte_packets;
+ p_common->rx_256_to_511_byte_packets =
+ stats.common.rx_256_to_511_byte_packets;
+ p_common->rx_512_to_1023_byte_packets =
+ stats.common.rx_512_to_1023_byte_packets;
+ p_common->rx_1024_to_1518_byte_packets =
+ stats.common.rx_1024_to_1518_byte_packets;
+ p_common->rx_crc_errors = stats.common.rx_crc_errors;
+ p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+ p_common->rx_pause_frames = stats.common.rx_pause_frames;
+ p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+ p_common->rx_align_errors = stats.common.rx_align_errors;
+ p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+ p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+ p_common->rx_jabbers = stats.common.rx_jabbers;
+ p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+ p_common->rx_fragments = stats.common.rx_fragments;
+ p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+ p_common->tx_65_to_127_byte_packets =
+ stats.common.tx_65_to_127_byte_packets;
+ p_common->tx_128_to_255_byte_packets =
+ stats.common.tx_128_to_255_byte_packets;
+ p_common->tx_256_to_511_byte_packets =
+ stats.common.tx_256_to_511_byte_packets;
+ p_common->tx_512_to_1023_byte_packets =
+ stats.common.tx_512_to_1023_byte_packets;
+ p_common->tx_1024_to_1518_byte_packets =
+ stats.common.tx_1024_to_1518_byte_packets;
+ p_common->tx_pause_frames = stats.common.tx_pause_frames;
+ p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+ p_common->brb_truncates = stats.common.brb_truncates;
+ p_common->brb_discards = stats.common.brb_discards;
+ p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+ if (QEDE_IS_BB(edev)) {
+ struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+ p_bb->rx_1519_to_1522_byte_packets =
+ stats.bb.rx_1519_to_1522_byte_packets;
+ p_bb->rx_1519_to_2047_byte_packets =
+ stats.bb.rx_1519_to_2047_byte_packets;
+ p_bb->rx_2048_to_4095_byte_packets =
+ stats.bb.rx_2048_to_4095_byte_packets;
+ p_bb->rx_4096_to_9216_byte_packets =
+ stats.bb.rx_4096_to_9216_byte_packets;
+ p_bb->rx_9217_to_16383_byte_packets =
+ stats.bb.rx_9217_to_16383_byte_packets;
+ p_bb->tx_1519_to_2047_byte_packets =
+ stats.bb.tx_1519_to_2047_byte_packets;
+ p_bb->tx_2048_to_4095_byte_packets =
+ stats.bb.tx_2048_to_4095_byte_packets;
+ p_bb->tx_4096_to_9216_byte_packets =
+ stats.bb.tx_4096_to_9216_byte_packets;
+ p_bb->tx_9217_to_16383_byte_packets =
+ stats.bb.tx_9217_to_16383_byte_packets;
+ p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+ p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+ } else {
+ struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+ p_ah->rx_1519_to_max_byte_packets =
+ stats.ah.rx_1519_to_max_byte_packets;
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
}
static void qede_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
+ p_common = &edev->stats.common;
- stats->rx_packets = edev->stats.rx_ucast_pkts +
- edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
- stats->tx_packets = edev->stats.tx_ucast_pkts +
- edev->stats.tx_mcast_pkts +
- edev->stats.tx_bcast_pkts;
-
- stats->rx_bytes = edev->stats.rx_ucast_bytes +
- edev->stats.rx_mcast_bytes +
- edev->stats.rx_bcast_bytes;
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
- stats->tx_bytes = edev->stats.tx_ucast_bytes +
- edev->stats.tx_mcast_bytes +
- edev->stats.tx_bcast_bytes;
+ stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
- stats->tx_errors = edev->stats.tx_err_drop_pkts;
- stats->multicast = edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
+ stats->tx_errors = p_common->tx_err_drop_pkts;
+ stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
- stats->rx_fifo_errors = edev->stats.no_buff_discards;
+ stats->rx_fifo_errors = p_common->no_buff_discards;
- stats->collisions = edev->stats.tx_total_collisions;
- stats->rx_crc_errors = edev->stats.rx_crc_errors;
- stats->rx_frame_errors = edev->stats.rx_align_errors;
+ if (QEDE_IS_BB(edev))
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
}
#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index f62c215be779..7116be485e61 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -26,6 +26,7 @@
/* SGMII digital lane registers */
#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
+#define EMAC_SGMII_LN_DRVR_CTRL1 0x0010
#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
#define EMAC_SGMII_LN_TX_MARGINING 0x001C
#define EMAC_SGMII_LN_TX_PRE 0x0020
@@ -48,6 +49,7 @@
#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC
#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8
#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8
+#define EMAC_SGMII_LN_RX_RESECODE_OFFSET 0x02CC
/* SGMII digital lane register values */
#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
@@ -73,6 +75,8 @@
#define CML_GEAR_MODE(x) (((x) & 7) << 3)
#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+#define RESCODE_OFFSET(x) ((x) & 0x1f)
+
#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
#define MIXER_DATARATE_MODE(x) ((x) & 3)
@@ -159,6 +163,8 @@ static const struct emac_reg_write sgmii_laned[] = {
{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
+ {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 672f6b696069..72233ab9474b 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1406,27 +1406,29 @@ static int cp_get_sset_count (struct net_device *dev, int sset)
}
}
-static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_gset(&cp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
-static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_sset(&cp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
@@ -1578,8 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
.get_sset_count = cp_get_sset_count,
- .get_settings = cp_get_settings,
- .set_settings = cp_set_settings,
.nway_reset = cp_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = cp_get_msglevel,
@@ -1593,6 +1593,8 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
.get_ringparam = cp_get_ringparam,
+ .get_link_ksettings = cp_get_link_ksettings,
+ .set_link_ksettings = cp_set_link_ksettings,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 89631753e799..ca22f2898664 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2384,21 +2384,23 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
-static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock);
- mii_ethtool_gset(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return 0;
}
-static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
int rc;
spin_lock_irq(&tp->lock);
- rc = mii_ethtool_sset(&tp->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return rc;
}
@@ -2480,8 +2482,6 @@ static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_drvinfo = rtl8139_get_drvinfo,
- .get_settings = rtl8139_get_settings,
- .set_settings = rtl8139_set_settings,
.get_regs_len = rtl8139_get_regs_len,
.get_regs = rtl8139_get_regs,
.nway_reset = rtl8139_nway_reset,
@@ -2493,6 +2493,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_strings = rtl8139_get_strings,
.get_sset_count = rtl8139_get_sset_count,
.get_ethtool_stats = rtl8139_get_ethtool_stats,
+ .get_link_ksettings = rtl8139_get_link_ksettings,
+ .set_link_ksettings = rtl8139_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 81f18a833527..0a8f2817ea60 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -817,7 +817,8 @@ struct rtl8169_private {
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*get_link_ksettings)(struct net_device *,
+ struct ethtool_link_ksettings *);
void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
@@ -2115,41 +2116,49 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 status;
+ u32 supported, advertising;
- cmd->supported =
+ supported =
SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_FIBRE;
status = RTL_R32(TBICSR);
- cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->autoneg = !!(status & TBINwEnable);
+ advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->base.autoneg = !!(status & TBINwEnable);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL; /* Always set */
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL; /* Always set */
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
}
-static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
int rc;
rtl_lock_work(tp);
- rc = tp->get_settings(dev, cmd);
+ rc = tp->get_link_ksettings(dev, cmd);
rtl_unlock_work(tp);
return rc;
@@ -2356,7 +2365,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
- .get_settings = rtl8169_get_settings,
.set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
@@ -2368,6 +2376,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
+ .get_link_ksettings = rtl8169_get_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -8351,14 +8360,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
tp->phy_reset_enable = rtl8169_tbi_reset_enable;
tp->phy_reset_pending = rtl8169_tbi_reset_pending;
tp->link_ok = rtl8169_tbi_link_ok;
tp->do_ioctl = rtl_tbi_ioctl;
} else {
tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
tp->phy_reset_enable = rtl8169_xmii_reset_enable;
tp->phy_reset_pending = rtl8169_xmii_reset_pending;
tp->link_ok = rtl8169_xmii_link_ok;
@@ -8444,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
~(RxBOVF | RxFOVF) : ~0;
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
+ setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 0f63a44a955d..b712ec23075b 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1115,7 +1115,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
const struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
@@ -1137,13 +1137,14 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_TP;
- ecmd->phy_address = 0xff;
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+
+ ecmd->base.phy_address = 0xff;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.speed = speed;
+ ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -1250,7 +1251,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1263,13 +1264,13 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
- ethtool_cmd_speed(ecmd)))
+ ecmd->base.speed))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
- ecmd->duplex))
+ ecmd->base.duplex))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
- ecmd->autoneg))
+ ecmd->base.autoneg))
return -EMSGSIZE;
rocker_tlv_nest_end(desc_info, cmd_info);
return 0;
@@ -1347,8 +1348,9 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
return 0;
}
-static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_link_ksettings *ecmd)
{
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_get_port_settings_prep, NULL,
@@ -1373,12 +1375,17 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
rocker_cmd_get_port_settings_mode_proc, p_mode);
}
-static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ const struct ethtool_link_ksettings *ecmd)
{
+ struct ethtool_link_ksettings copy_ecmd;
+
+ memcpy(&copy_ecmd, ecmd, sizeof(copy_ecmd));
+
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL);
+ &copy_ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
@@ -2237,16 +2244,18 @@ static int rocker_router_fib_event(struct notifier_block *nb,
* ethtool interface
********************/
-static int rocker_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
}
-static int rocker_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
@@ -2388,13 +2397,13 @@ static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
}
static const struct ethtool_ops rocker_port_ethtool_ops = {
- .get_settings = rocker_port_get_settings,
- .set_settings = rocker_port_set_settings,
.get_drvinfo = rocker_port_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = rocker_port_get_strings,
.get_ethtool_stats = rocker_port_get_stats,
.get_sset_count = rocker_port_get_sset_count,
+ .get_link_ksettings = rocker_port_get_link_ksettings,
+ .set_link_ksettings = rocker_port_set_link_ksettings,
};
/*****************
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 57e6cef81ebe..52ead5524de7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1558,25 +1558,27 @@ static void ioc3_get_drvinfo (struct net_device *dev,
strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
-static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_gset(&ip->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
-static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_sset(&ip->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
@@ -1608,10 +1610,10 @@ static u32 ioc3_get_link(struct net_device *dev)
static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
- .get_settings = ioc3_get_settings,
- .set_settings = ioc3_set_settings,
.nway_reset = ioc3_nway_reset,
.get_link = ioc3_get_link,
+ .get_link_ksettings = ioc3_get_link_ksettings,
+ .set_link_ksettings = ioc3_set_link_ksettings,
};
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 6c2e2b311c16..751c81848f35 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1122,14 +1122,16 @@ static void sc92031_poll_controller(struct net_device *dev)
}
#endif
-static int sc92031_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
u8 phy_address;
u32 phy_ctrl;
u16 output_status;
+ u32 supported, advertising;
spin_lock_bh(&priv->lock);
@@ -1142,68 +1144,77 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
spin_unlock_bh(&priv->lock);
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advertising = ADVERTISED_TP | ADVERTISED_MII;
if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
== (PhyCtrlSpd10 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
== (PhyCtrlSpd100 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (phy_ctrl & PhyCtrlAne)
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ethtool_cmd_speed_set(cmd,
- (output_status & 0x2) ? SPEED_100 : SPEED_10);
- cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_MII;
- cmd->phy_address = phy_address;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
+ cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = phy_address;
+ cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int sc92031_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 phy_ctrl;
u32 old_phy_ctrl;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(speed == SPEED_10 || speed == SPEED_100))
return -EINVAL;
- if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
- return -EINVAL;
- if (!(cmd->port == PORT_MII))
+ if (!(cmd->base.duplex == DUPLEX_HALF ||
+ cmd->base.duplex == DUPLEX_FULL))
return -EINVAL;
- if (!(cmd->phy_address == 0x1f))
+ if (!(cmd->base.port == PORT_MII))
return -EINVAL;
- if (!(cmd->transceiver == XCVR_INTERNAL))
+ if (!(cmd->base.phy_address == 0x1f))
return -EINVAL;
- if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
+ if (!(cmd->base.autoneg == AUTONEG_DISABLE ||
+ cmd->base.autoneg == AUTONEG_ENABLE))
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (!(cmd->advertising & (ADVERTISED_Autoneg
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(advertising & (ADVERTISED_Autoneg
| ADVERTISED_100baseT_Full
| ADVERTISED_100baseT_Half
| ADVERTISED_10baseT_Full
@@ -1213,15 +1224,15 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
phy_ctrl = PhyCtrlAne;
// FIXME: I'm not sure what the original code was trying to do
- if (cmd->advertising & ADVERTISED_Autoneg)
+ if (advertising & ADVERTISED_Autoneg)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
phy_ctrl |= PhyCtrlSpd10;
} else {
// FIXME: Whole branch guessed
@@ -1232,7 +1243,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
else /* cmd->speed == SPEED_100 */
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
phy_ctrl |= PhyCtrlDux;
}
@@ -1368,8 +1379,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops sc92031_ethtool_ops = {
- .get_settings = sc92031_ethtool_get_settings,
- .set_settings = sc92031_ethtool_set_settings,
.get_wol = sc92031_ethtool_get_wol,
.set_wol = sc92031_ethtool_set_wol,
.nway_reset = sc92031_ethtool_nway_reset,
@@ -1377,6 +1386,8 @@ static const struct ethtool_ops sc92031_ethtool_ops = {
.get_strings = sc92031_ethtool_get_strings,
.get_sset_count = sc92031_ethtool_get_sset_count,
.get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
+ .get_link_ksettings = sc92031_ethtool_get_link_ksettings,
+ .set_link_ksettings = sc92031_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 210e35d079dd..02da106c6e04 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1734,18 +1734,20 @@ static void sis190_set_speed_auto(struct net_device *dev)
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
}
-static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii_if, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
}
-static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_sset(&tp->mii_if, cmd);
+ return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
}
static void sis190_get_drvinfo(struct net_device *dev,
@@ -1797,8 +1799,6 @@ static void sis190_set_msglevel(struct net_device *dev, u32 value)
}
static const struct ethtool_ops sis190_ethtool_ops = {
- .get_settings = sis190_get_settings,
- .set_settings = sis190_set_settings,
.get_drvinfo = sis190_get_drvinfo,
.get_regs_len = sis190_get_regs_len,
.get_regs = sis190_get_regs,
@@ -1806,6 +1806,8 @@ static const struct ethtool_ops sis190_ethtool_ops = {
.get_msglevel = sis190_get_msglevel,
.set_msglevel = sis190_set_msglevel,
.nway_reset = sis190_nway_reset,
+ .get_link_ksettings = sis190_get_link_ksettings,
+ .set_link_ksettings = sis190_set_link_ksettings,
};
static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 1b6f6171d078..40bd88362e3d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2035,23 +2035,23 @@ static u32 sis900_get_link(struct net_device *net_dev)
return mii_link_ok(&sis_priv->mii_info);
}
-static int sis900_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
spin_lock_irq(&sis_priv->lock);
- mii_ethtool_gset(&sis_priv->mii_info, cmd);
+ mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return 0;
}
-static int sis900_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
int rt;
spin_lock_irq(&sis_priv->lock);
- rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
+ rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return rt;
}
@@ -2129,11 +2129,11 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.get_msglevel = sis900_get_msglevel,
.set_msglevel = sis900_set_msglevel,
.get_link = sis900_get_link,
- .get_settings = sis900_get_settings,
- .set_settings = sis900_set_settings,
.nway_reset = sis900_nway_reset,
.get_wol = sis900_get_wol,
- .set_wol = sis900_set_wol
+ .set_wol = sis900_set_wol,
+ .get_link_ksettings = sis900_get_link_ksettings,
+ .set_link_ksettings = sis900_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 5f2737189c72..db6dcb06193d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1387,25 +1387,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_gset(&np->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
@@ -1460,14 +1462,14 @@ static void ethtool_complete(struct net_device *dev)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.begin = ethtool_begin,
- .complete = ethtool_complete
+ .complete = ethtool_complete,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 4f19c6166182..36307d34f641 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret, status;
unsigned long flags;
+ u32 supported;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
-
- cmd->autoneg = AUTONEG_DISABLE;
- if (lp->mii.phy_id==1)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->port = 0;
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
- cmd->duplex =
+ cmd->base.duplex =
(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+
ret = 0;
}
@@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP &&
+ cmd->base.port != PORT_AUI))
return -EINVAL;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
ret = 0;
}
@@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
}
static const struct ethtool_ops smc911x_ethtool_ops = {
- .get_settings = smc911x_ethtool_getsettings,
- .set_settings = smc911x_ethtool_setsettings,
.get_drvinfo = smc911x_ethtool_getdrvinfo,
.get_msglevel = smc911x_ethtool_getmsglevel,
.set_msglevel = smc911x_ethtool_setmsglevel,
@@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
.get_eeprom_len = smc911x_ethtool_geteeprom_len,
.get_eeprom = smc911x_ethtool_geteeprom,
.set_eeprom = smc911x_ethtool_seteeprom,
+ .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 97280daba27f..976aa876789a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,56 +1843,60 @@ static int smc_link_ok(struct net_device *dev)
}
}
-static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
+ u32 supported;
- ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
-
- SMC_SELECT_BANK(1);
- tmp = inw(ioaddr + CONFIG);
- ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->phy_address = ioaddr + MGMT;
+ supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->base.port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->base.speed = SPEED_10;
+ ecmd->base.phy_address = ioaddr + MGMT;
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->base.duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+
+ return 0;
}
-static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
- if (ethtool_cmd_speed(ecmd) != SPEED_10)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
+ if (ecmd->base.speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->base.duplex != DUPLEX_HALF &&
+ ecmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_AUI)
+ return -EINVAL;
- if (ecmd->port == PORT_AUI)
- smc_set_xcvr(dev, 1);
- else
- smc_set_xcvr(dev, 0);
+ if (ecmd->base.port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- if (ecmd->duplex == DUPLEX_FULL)
- tmp |= TCR_FDUPLX;
- else
- tmp &= ~TCR_FDUPLX;
- outw(tmp, ioaddr + TCR);
-
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->base.duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(tmp, ioaddr + TCR);
+
+ return 0;
}
static int check_if_running(struct net_device *dev)
@@ -1908,7 +1912,8 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1919,7 +1924,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_gset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1927,7 +1932,8 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return ret;
}
-static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1938,7 +1944,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_sset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_set_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1982,10 +1988,10 @@ static int smc_nway_reset(struct net_device *dev)
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = smc_get_drvinfo,
- .get_settings = smc_get_settings,
- .set_settings = smc_set_settings,
.get_link = smc_get_link,
.nway_reset = smc_nway_reset,
+ .get_link_ksettings = smc_get_link_ksettings,
+ .set_link_ksettings = smc_set_link_ksettings,
};
static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 04d9245b7149..9f0d26da6813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -324,6 +324,9 @@ struct dma_features {
unsigned int number_tx_queues;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
+ /* TX and RX FIFO sizes */
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -454,17 +457,32 @@ struct stmmac_ops {
/* Enable and verify that the IPC module is supported */
int (*rx_ipc)(struct mac_device_info *hw);
/* Enable RX Queues */
- void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
+ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+ /* Program RX Algorithms */
+ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+ /* Program TX Algorithms */
+ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+ /* Set MTL TX queues weight */
+ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ u32 weight, u32 queue);
+ /* RX MTL queue to RX dma mapping */
+ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+ /* Configure AV Algorithm */
+ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+ u32 idle_slope, u32 high_credit, u32 low_credit,
+ u32 queue);
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
int (*host_irq_status)(struct mac_device_info *hw,
struct stmmac_extra_stats *x);
+ /* Handle MTL interrupts */
+ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
/* Multicast filter setting */
void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
/* Flow control setting */
void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time);
+ unsigned int fc, unsigned int pause_time, u32 tx_cnt);
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
@@ -477,7 +495,8 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues);
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 1a3fa3d9f855..dd6a2f9791cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -14,16 +14,34 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+struct tegra_eqos {
+ struct device *dev;
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_master;
+ struct clk *clk_slave;
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+
+ struct gpio_desc *reset;
+};
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return 0;
}
+static void *dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ int err;
+
+ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(plat_dat->stmmac_clk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ return ERR_CAST(plat_dat->stmmac_clk);
+ }
+
+ err = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(plat_dat->pclk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ err = PTR_ERR(plat_dat->pclk);
+ goto disable;
+ }
+
+ err = clk_prepare_enable(plat_dat->pclk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
+ err);
+ goto disable;
+ }
+
+ return NULL;
+
+disable:
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ERR_PTR(err);
+}
+
+static int dwc_qos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+#define SDMEMCOMPPADCTRL 0x8800
+#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define AUTO_CAL_CONFIG 0x8804
+#define AUTO_CAL_CONFIG_START BIT(31)
+#define AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define AUTO_CAL_STATUS 0x880c
+#define AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate = 125000000;
+ bool needs_calibration = false;
+ u32 value;
+ int err;
+
+ switch (speed) {
+ case SPEED_1000:
+ needs_calibration = true;
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ needs_calibration = true;
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(eqos->dev, "invalid speed %u\n", speed);
+ break;
+ }
+
+ if (needs_calibration) {
+ /* calibrate */
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ udelay(1);
+
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ value & AUTO_CAL_STATUS_ACTIVE,
+ 1, 10);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration did not start\n");
+ goto failed;
+ }
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ (value & AUTO_CAL_STATUS_ACTIVE) == 0,
+ 20, 200);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration didn't finish\n");
+ goto failed;
+ }
+
+ failed:
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+ } else {
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value &= ~AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+ }
+
+ err = clk_set_rate(eqos->clk_tx, rate);
+ if (err < 0)
+ dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
+}
+
+static int tegra_eqos_init(struct platform_device *pdev, void *priv)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate;
+ u32 value;
+
+ rate = clk_get_rate(eqos->clk_slave);
+
+ value = (rate / 1000000) - 1;
+ writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
+
+ return 0;
+}
+
+static void *tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
+{
+ struct tegra_eqos *eqos;
+ int err;
+
+ eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
+ if (!eqos) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ eqos->dev = &pdev->dev;
+ eqos->regs = res->addr;
+
+ eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
+ if (IS_ERR(eqos->clk_master)) {
+ err = PTR_ERR(eqos->clk_master);
+ goto error;
+ }
+
+ err = clk_prepare_enable(eqos->clk_master);
+ if (err < 0)
+ goto error;
+
+ eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
+ if (IS_ERR(eqos->clk_slave)) {
+ err = PTR_ERR(eqos->clk_slave);
+ goto disable_master;
+ }
+
+ data->stmmac_clk = eqos->clk_slave;
+
+ err = clk_prepare_enable(eqos->clk_slave);
+ if (err < 0)
+ goto disable_master;
+
+ eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(eqos->clk_rx)) {
+ err = PTR_ERR(eqos->clk_rx);
+ goto disable_slave;
+ }
+
+ err = clk_prepare_enable(eqos->clk_rx);
+ if (err < 0)
+ goto disable_slave;
+
+ eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(eqos->clk_tx)) {
+ err = PTR_ERR(eqos->clk_tx);
+ goto disable_rx;
+ }
+
+ err = clk_prepare_enable(eqos->clk_tx);
+ if (err < 0)
+ goto disable_rx;
+
+ eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(eqos->reset)) {
+ err = PTR_ERR(eqos->reset);
+ goto disable_tx;
+ }
+
+ usleep_range(2000, 4000);
+ gpiod_set_value(eqos->reset, 0);
+
+ eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
+ if (IS_ERR(eqos->rst)) {
+ err = PTR_ERR(eqos->rst);
+ goto reset_phy;
+ }
+
+ err = reset_control_assert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ data->fix_mac_speed = tegra_eqos_fix_speed;
+ data->init = tegra_eqos_init;
+ data->bsp_priv = eqos;
+
+ err = tegra_eqos_init(pdev, eqos);
+ if (err < 0)
+ goto reset;
+
+out:
+ return eqos;
+
+reset:
+ reset_control_assert(eqos->rst);
+reset_phy:
+ gpiod_set_value(eqos->reset, 1);
+disable_tx:
+ clk_disable_unprepare(eqos->clk_tx);
+disable_rx:
+ clk_disable_unprepare(eqos->clk_rx);
+disable_slave:
+ clk_disable_unprepare(eqos->clk_slave);
+disable_master:
+ clk_disable_unprepare(eqos->clk_master);
+error:
+ eqos = ERR_PTR(err);
+ goto out;
+}
+
+static int tegra_eqos_remove(struct platform_device *pdev)
+{
+ struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
+
+ reset_control_assert(eqos->rst);
+ gpiod_set_value(eqos->reset, 1);
+ clk_disable_unprepare(eqos->clk_tx);
+ clk_disable_unprepare(eqos->clk_rx);
+ clk_disable_unprepare(eqos->clk_slave);
+ clk_disable_unprepare(eqos->clk_master);
+
+ return 0;
+}
+
+struct dwc_eth_dwmac_data {
+ void *(*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
+ int (*remove)(struct platform_device *pdev);
+};
+
+static const struct dwc_eth_dwmac_data dwc_qos_data = {
+ .probe = dwc_qos_probe,
+ .remove = dwc_qos_remove,
+};
+
+static const struct dwc_eth_dwmac_data tegra_eqos_data = {
+ .probe = tegra_eqos_probe,
+ .remove = tegra_eqos_remove,
+};
+
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
{
+ const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct resource *res;
+ void *priv;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
/**
@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- ret = PTR_ERR(plat_dat->stmmac_clk);
- plat_dat->stmmac_clk = NULL;
- goto err_remove_config_dt;
- }
- clk_prepare_enable(plat_dat->stmmac_clk);
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- ret = PTR_ERR(plat_dat->pclk);
- plat_dat->pclk = NULL;
- goto err_out_clk_dis_phy;
+ priv = data->probe(pdev, plat_dat, &stmmac_res);
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
+ dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
+ goto remove_config;
}
- clk_prepare_enable(plat_dat->pclk);
ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
- return 0;
+ return ret;
-err_out_clk_dis_aper:
- clk_disable_unprepare(plat_dat->pclk);
-err_out_clk_dis_phy:
- clk_disable_unprepare(plat_dat->stmmac_clk);
-err_remove_config_dt:
+remove:
+ data->remove(pdev);
+remove_config:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
@@ -178,11 +479,29 @@ err_remove_config_dt:
static int dwc_eth_dwmac_remove(struct platform_device *pdev)
{
- return stmmac_pltfr_remove(pdev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct dwc_eth_dwmac_data *data;
+ int err;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ err = stmmac_dvr_remove(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = data->remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
- { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
+ { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 19b9b3087099..7f78f7746a5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
/* Set flow such that DZPQ in Mac Register 6 is 0,
@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value = readl(ioaddr + GMAC_DEBUG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index e370ccec6176..524135e6dd89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
}
static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index db45134fddf0..54bcdb4d10db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -25,6 +25,7 @@
#define GMAC_RXQ_CTRL0 0x000000a0
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
+#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -148,6 +149,8 @@ enum power_event {
/* MAC HW features1 bitmap */
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
+#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
/* MAC HW features2 bitmap */
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
@@ -161,8 +164,25 @@ enum power_event {
#define GMAC_HI_REG_AE BIT(31)
/* MTL registers */
+#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
+#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
+#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
+#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
+#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
+#define MTL_OPERATION_RAA BIT(2)
+#define MTL_OPERATION_RAA_SP (0x0 << 2)
+#define MTL_OPERATION_RAA_WSP (0x1 << 2)
+
#define MTL_INT_STATUS 0x00000c20
-#define MTL_INT_Q0 BIT(0)
+#define MTL_INT_QX(x) BIT(x)
+
+#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
+#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
+#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
+#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
#define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40
@@ -180,6 +200,7 @@ enum power_event {
#define MTL_OP_MODE_TSF BIT(1)
#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
+#define MTL_OP_MODE_TQS_SHIFT 16
#define MTL_OP_MODE_TTC_MASK 0x70
#define MTL_OP_MODE_TTC_SHIFT 4
@@ -193,6 +214,17 @@ enum power_event {
#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
+#define MTL_OP_MODE_RQS_SHIFT 20
+
+#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
+#define MTL_OP_MODE_RFD_SHIFT 14
+
+#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
+#define MTL_OP_MODE_RFA_SHIFT 8
+
+#define MTL_OP_MODE_EHFC BIT(7)
+
#define MTL_OP_MODE_RTC_MASK 0x18
#define MTL_OP_MODE_RTC_SHIFT 3
@@ -201,6 +233,46 @@ enum power_event {
#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+/* MTL ETS Control register */
+#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
+#define MTL_ETS_CTRL_BASE_OFFSET 0x40
+#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
+ ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+#define MTL_ETS_CTRL_CC BIT(3)
+#define MTL_ETS_CTRL_AVALG BIT(2)
+
+/* MTL Queue Quantum Weight */
+#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
+#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
+#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
+ ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
+
+/* MTL sendSlopeCredit register */
+#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
+#define MTL_SEND_SLP_CRED_OFFSET 0x40
+#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
+ ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
+
+/* MTL hiCredit register */
+#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
+#define MTL_HIGH_CRED_OFFSET 0x40
+#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
+ ((x) * MTL_HIGH_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
+
+/* MTL loCredit register */
+#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
+#define MTL_LOW_CRED_OFFSET 0x40
+#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
+ ((x) * MTL_LOW_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
+
/* MTL debug */
#define MTL_DEBUG_TXSTSFSTS BIT(5)
#define MTL_DEBUG_TXFSTS BIT(4)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 1e79e6529c4a..10599dbc232f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -59,17 +59,143 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
writel(value, ioaddr + GMAC_INT_EN);
}
-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+ u8 mode, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
value &= GMAC_RX_QUEUE_CLEAR(queue);
- value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
writel(value, ioaddr + GMAC_RXQ_CTRL0);
}
+static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_RAA;
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ value |= MTL_OPERATION_RAA_SP;
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= MTL_OPERATION_RAA_WSP;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_SCHALG_MASK;
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= MTL_OPERATION_SCHALG_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= MTL_OPERATION_SCHALG_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= MTL_OPERATION_SCHALG_DWRR;
+ break;
+ case MTL_TX_ALGORITHM_SP:
+ value |= MTL_OPERATION_SCHALG_SP;
+ break;
+ default:
+ break;
+ }
+}
+
+static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+
+ value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
+ value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
+ writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+}
+
+static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (queue < 4)
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+
+ if (queue == 0 || queue == 4) {
+ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
+ value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
+ }
+
+ if (queue < 4)
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+}
+
+static void dwmac4_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
+ pr_debug("\tsend_slope: 0x%08x\n", send_slope);
+ pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
+ pr_debug("\thigh_credit: 0x%08x\n", high_credit);
+ pr_debug("\tlow_credit: 0x%08x\n", low_credit);
+
+ /* enable AV algorithm */
+ value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ value |= MTL_ETS_CTRL_AVALG;
+ value |= MTL_ETS_CTRL_CC;
+ writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+
+ /* configure send slope */
+ value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
+ value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
+ writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+
+ /* configure idle slope (same register as tx weight) */
+ dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_HC_MASK;
+ value |= high_credit & MTL_HIGH_CRED_HC_MASK;
+ writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_LC_MASK;
+ value |= low_credit & MTL_HIGH_CRED_LC_MASK;
+ writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+}
+
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
{
void __iomem *ioaddr = hw->pcsr;
@@ -251,11 +377,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
- u32 channel = STMMAC_CHAN0; /* FIXME */
unsigned int flow = 0;
+ u32 queue = 0;
pr_debug("GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
@@ -265,13 +392,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");
- flow |= GMAC_TX_FLOW_CTRL_TFE;
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
- if (duplex) {
+ if (duplex)
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
- flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+
+ if (duplex)
+ flow |=
+ (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}
}
@@ -325,11 +457,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
}
}
+static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ int ret = 0;
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+
+ /* Check MTL Interrupt */
+ if (mtl_int_qx_status & MTL_INT_QX(chan)) {
+ /* read Queue x Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(chan));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
static int dwmac4_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
- u32 mtl_int_qx_status;
u32 intr_status;
int ret = 0;
@@ -348,20 +503,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
- mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
- /* Check MTL Interrupt: Currently only one queue is used: Q0. */
- if (mtl_int_qx_status & MTL_INT_Q0) {
- /* read Queue 0 Interrupt status */
- u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
-
- if (status & MTL_RX_OVERFLOW_INT) {
- /* clear Interrupt */
- writel(status | MTL_RX_OVERFLOW_INT,
- ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
- ret = CORE_IRQ_MTL_RX_OVERFLOW;
- }
- }
-
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
if (intr_status & PCS_RGSMIIIS_IRQ)
dwmac4_phystatus(ioaddr, x);
@@ -369,64 +510,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value;
-
- /* Currently only channel 0 is supported */
- value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
-
- if (value & MTL_DEBUG_TXSTSFSTS)
- x->mtl_tx_status_fifo_full++;
- if (value & MTL_DEBUG_TXFSTS)
- x->mtl_tx_fifo_not_empty++;
- if (value & MTL_DEBUG_TWCSTS)
- x->mmtl_fifo_ctrl++;
- if (value & MTL_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
- >> MTL_DEBUG_TRCSTS_SHIFT;
- if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
- x->mtl_tx_fifo_read_ctrl_write++;
- else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
- x->mtl_tx_fifo_read_ctrl_wait++;
- else if (trcsts == MTL_DEBUG_TRCSTS_READ)
- x->mtl_tx_fifo_read_ctrl_read++;
- else
- x->mtl_tx_fifo_read_ctrl_idle++;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
}
- if (value & MTL_DEBUG_TXPAUSED)
- x->mac_tx_in_pause++;
- value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+ for (queue = 0; queue < rx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
- if (value & MTL_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
- >> MTL_DEBUG_RRCSTS_SHIFT;
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
- if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
- x->mtl_rx_fifo_fill_level_full++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
- x->mtl_rx_fifo_fill_above_thresh++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
- x->mtl_rx_fifo_fill_below_thresh++;
- else
- x->mtl_rx_fifo_fill_level_empty++;
- }
- if (value & MTL_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
- MTL_DEBUG_RRCSTS_SHIFT;
-
- if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
- x->mtl_rx_fifo_read_ctrl_flush++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
- x->mtl_rx_fifo_read_ctrl_read_data++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
- x->mtl_rx_fifo_read_ctrl_status++;
- else
- x->mtl_rx_fifo_read_ctrl_idle++;
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
}
- if (value & MTL_DEBUG_RWCSTS)
- x->mtl_rx_fifo_ctrl_active++;
/* GMAC debug */
value = readl(ioaddr + GMAC_DEBUG);
@@ -457,8 +603,14 @@ static const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
.dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
.flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index f97b0d5d9987..6ac6b2600a7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -183,8 +183,9 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
}
static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
- int rxmode, u32 channel)
+ int rxmode, u32 channel, int rxfifosz)
{
+ unsigned int rqs = rxfifosz / 256 - 1;
u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
/* Following code only done for channel 0, other channels not yet
@@ -250,6 +251,53 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
mtl_rx_op |= MTL_OP_MODE_RTC_128;
}
+ mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
+ mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+
+ /* enable flow control only if each channel gets 4 KiB or more FIFO */
+ if (rxfifosz >= 4096) {
+ unsigned int rfd, rfa;
+
+ mtl_rx_op |= MTL_OP_MODE_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (rxfifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
+ mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
+ mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ }
+
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
/* Enable MTL RX overflow */
@@ -262,7 +310,7 @@ static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode, int rxfifosz)
{
/* Only Channel 0 is actually configured and used */
- dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+ dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0, rxfifosz);
}
static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -294,6 +342,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
+ * shifting and store the sizes in bytes.
+ */
+ dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
/* MAC HW feature2 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
/* TX and RX number of channels */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 85d64114e159..61b9369a041e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phy = netdev->phydev;
int new_pause = FLOW_OFF;
@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
- priv->pause);
+ priv->pause, tx_cnt);
return 0;
}
@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
if ((priv->hw->mac->debug) &&
(priv->synopsys_id >= DWMAC_CORE_3_50))
priv->hw->mac->debug(priv->ioaddr,
- (void *)&priv->xstats);
+ (void *)&priv->xstats,
+ rx_queues_count, tx_queues_count);
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4498a3861aa3..ec363e19cd79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -673,6 +673,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+/**
* stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
* Description: this is the helper called by the physical abstraction layer
@@ -687,7 +700,6 @@ static void stmmac_adjust_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
- unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
if (!phydev)
return;
@@ -709,8 +721,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/* Flow Control operation */
if (phydev->pause)
- priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
- fc, pause_time);
+ stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
new_state = 1;
@@ -1256,19 +1267,14 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
- int rx_count = priv->dma_cap.number_rx_queues;
- int queue = 0;
-
- /* If GMAC does not have multiple queues, then this is not necessary*/
- if (rx_count == 1)
- return;
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
- /**
- * If the core is synthesized with multiple rx queues / multiple
- * dma channels, then rx queues will be disabled by default.
- * For now only rx queue 0 is enabled.
- */
- priv->hw->mac->rx_queue_enable(priv->hw, queue);
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+ }
}
/**
@@ -1281,6 +1287,9 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
int rxfifosz = priv->plat->rx_fifo_size;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
if (priv->plat->force_thresh_dma_mode)
priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
@@ -1446,6 +1455,9 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
int status;
int rxfifosz = priv->plat->rx_fifo_size;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
if (likely((status & handle_rx)) || (status & handle_tx)) {
if (likely(napi_schedule_prep(&priv->napi))) {
@@ -1645,6 +1657,101 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ priv->hw->mac->config_cbs(priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
+ priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
+ priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
+ stmmac_mac_enable_rx_queues(priv);
+}
+
+/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
* Description:
@@ -1688,9 +1795,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
- /* Initialize MAC RX Queues */
- if (priv->hw->mac->rx_queue_enable)
- stmmac_mac_enable_rx_queues(priv);
+ /* Initialize MTL*/
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_mtl_configuration(priv);
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
@@ -1711,6 +1818,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -1754,6 +1865,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
return 0;
}
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -1821,7 +1939,7 @@ static int stmmac_open(struct net_device *dev)
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
- goto init_error;
+ goto irq_error;
}
/* Request the Wake IRQ in case of another line is used for WoL */
@@ -1858,7 +1976,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
+irq_error:
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+ del_timer_sync(&priv->txtimer);
+ stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
@@ -2063,6 +2186,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
stmmac_tso_allocator(priv, des, skb_frag_size(frag),
(i == nfrags - 1));
@@ -2808,6 +2933,11 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ status |= priv->hw->mac->host_mtl_irq_status(priv->hw,
+ STMMAC_CHAN0);
+
if (unlikely(status)) {
/* For LPI we need to save the tx status */
if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 5c9e462276b9..cea472a7c335 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -88,6 +88,10 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
}
static int quark_default_data(struct plat_stmmacenet_data *plat,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 433a84239a68..37f550ae76a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
if (!np)
return NULL;
- axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
if (!axi) {
of_node_put(np);
return ERR_PTR(-ENOMEM);
@@ -132,6 +132,118 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
}
/**
+ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
+ * @pdev: platform device
+ */
+static void stmmac_mtl_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *q_node;
+ struct device_node *rx_node;
+ struct device_node *tx_node;
+ u8 queue = 0;
+
+ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+ if (!rx_node)
+ return;
+
+ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+ if (!tx_node) {
+ of_node_put(rx_node);
+ return;
+ }
+
+ /* Processing RX queues common config */
+ if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use))
+ plat->rx_queues_to_use = 1;
+
+ if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+ else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
+ else
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ /* Processing individual RX queue config */
+ for_each_child_of_node(rx_node, q_node) {
+ if (queue >= plat->rx_queues_to_use)
+ break;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ else
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+
+ if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan))
+ plat->rx_queues_cfg[queue].chan = queue;
+ /* TODO: Dynamic mapping to be included in the future */
+
+ queue++;
+ }
+
+ /* Processing TX queues common config */
+ if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use))
+ plat->tx_queues_to_use = 1;
+
+ if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+ else
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+
+ queue = 0;
+
+ /* Processing individual TX queue config */
+ for_each_child_of_node(tx_node, q_node) {
+ if (queue >= plat->tx_queues_to_use)
+ break;
+
+ if (of_property_read_u8(q_node, "snps,weight",
+ &plat->tx_queues_cfg[queue].weight))
+ plat->tx_queues_cfg[queue].weight = 0x10 + queue;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ } else if (of_property_read_bool(q_node,
+ "snps,avb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+
+ /* Credit Base Shaper parameters used by AVB */
+ if (of_property_read_u32(q_node, "snps,send_slope",
+ &plat->tx_queues_cfg[queue].send_slope))
+ plat->tx_queues_cfg[queue].send_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,idle_slope",
+ &plat->tx_queues_cfg[queue].idle_slope))
+ plat->tx_queues_cfg[queue].idle_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,high_credit",
+ &plat->tx_queues_cfg[queue].high_credit))
+ plat->tx_queues_cfg[queue].high_credit = 0x0;
+ if (of_property_read_u32(q_node, "snps,low_credit",
+ &plat->tx_queues_cfg[queue].low_credit))
+ plat->tx_queues_cfg[queue].low_credit = 0x0;
+ } else {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+ queue++;
+ }
+
+ of_node_put(rx_node);
+ of_node_put(tx_node);
+ of_node_put(q_node);
+}
+
+/**
* stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
* @plat: driver data platform structure
* @np: device tree node
@@ -340,6 +452,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev);
+ stmmac_mtl_setup(pdev, plat);
+
/* clock setup */
plat->stmmac_clk = devm_clk_get(&pdev->dev,
STMMAC_RESOURCE_NAME);
@@ -359,13 +473,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
clk_prepare_enable(plat->pclk);
/* Fall-back to main clock in case of no PTP ref is passed */
- plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
if (IS_ERR(plat->clk_ptp_ref)) {
plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
plat->clk_ptp_ref = NULL;
dev_warn(&pdev->dev, "PTP uses main clock\n");
} else {
- clk_prepare_enable(plat->clk_ptp_ref);
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0e8e89f17dbb..382993c1561c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
}
/* Must be invoked under cp->lock */
-static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+static void cas_begin_auto_negotiation(struct cas *cp,
+ const struct ethtool_link_ksettings *ep)
{
u16 ctl;
#if 1
@@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
if (!ep)
goto start_aneg;
lcntl = cp->link_cntl;
- if (ep->autoneg == AUTONEG_ENABLE)
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
cp->link_cntl = BMCR_ANENABLE;
- else {
- u32 speed = ethtool_cmd_speed(ep);
+ } else {
+ u32 speed = ep->base.speed;
cp->link_cntl = 0;
if (speed == SPEED_100)
cp->link_cntl |= BMCR_SPEED100;
else if (speed == SPEED_1000)
cp->link_cntl |= CAS_BMCR_SPEED1000;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
cp->link_cntl |= BMCR_FULLDPLX;
}
#if 1
@@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
-static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
u16 bmcr;
int full_duplex, speed, pause;
unsigned long flags;
enum link_state linkstate = link_up;
+ u32 supported, advertising;
- cmd->advertising = 0;
- cmd->supported = SUPPORTED_Autoneg;
+ advertising = 0;
+ supported = SUPPORTED_Autoneg;
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
- cmd->supported |= SUPPORTED_1000baseT_Full;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
/* Record PHY settings if HW is on. */
@@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bmcr = 0;
linkstate = cp->lstate;
if (CAS_PHY_MII(cp->phy_type)) {
- cmd->port = PORT_MII;
- cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
- cmd->phy_address = cp->phy_addr;
- cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = cp->phy_addr;
+ advertising |= ADVERTISED_TP | ADVERTISED_MII |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->supported |=
+ supported |=
(SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
} else {
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = 0;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
/* pcs uses the same bits as mii */
@@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irqrestore(&cp->lock, flags);
if (bmcr & BMCR_ANENABLE) {
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
- ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.speed = ((speed == 10) ?
SPEED_10 :
((speed == 1000) ?
- SPEED_1000 : SPEED_100)));
- cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ SPEED_1000 : SPEED_100));
+ cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
SPEED_1000 :
((bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10)));
- cmd->duplex =
- (bmcr & BMCR_FULLDPLX) ?
+ SPEED_100 : SPEED_10));
+ cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
if (linkstate != link_up) {
@@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* settings that we configured.
*/
if (cp->link_cntl & BMCR_ANENABLE) {
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (cp->link_cntl & BMCR_SPEED100) {
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
}
- cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
}
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
- .get_settings = cas_get_settings,
- .set_settings = cas_set_settings,
.nway_reset = cas_nway_reset,
.get_link = cas_get_link,
.get_msglevel = cas_get_msglevel,
@@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = {
.get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
+ .get_link_ksettings = cas_get_link_ksettings,
+ .set_link_ksettings = cas_set_link_ksettings,
};
static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 57978056b336..2dcca249eb9c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp;
@@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lp = &np->link_config;
memset(cmd, 0, sizeof(*cmd));
- cmd->phy_address = np->phy_addr;
- cmd->supported = lp->supported;
- cmd->advertising = lp->active_advertising;
- cmd->autoneg = lp->active_autoneg;
- ethtool_cmd_speed_set(cmd, lp->active_speed);
- cmd->duplex = lp->active_duplex;
- cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
- cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
+ cmd->base.phy_address = np->phy_addr;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ lp->supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ lp->active_advertising);
+ cmd->base.autoneg = lp->active_autoneg;
+ cmd->base.speed = lp->active_speed;
+ cmd->base.duplex = lp->active_duplex;
+ cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
return 0;
}
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp = &np->link_config;
- lp->advertising = cmd->advertising;
- lp->speed = ethtool_cmd_speed(cmd);
- lp->duplex = cmd->duplex;
- lp->autoneg = cmd->autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
+ cmd->link_modes.advertising);
+ lp->speed = cmd->base.speed;
+ lp->duplex = cmd->base.duplex;
+ lp->autoneg = cmd->base.autoneg;
return niu_init_link(np);
}
@@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = {
.nway_reset = niu_nway_reset,
.get_eeprom_len = niu_get_eeprom_len,
.get_eeprom = niu_get_eeprom,
- .get_settings = niu_get_settings,
- .set_settings = niu_set_settings,
.get_strings = niu_get_strings,
.get_sset_count = niu_get_sset_count,
.get_ethtool_stats = niu_get_ethtool_stats,
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_link_ksettings = niu_get_link_ksettings,
+ .set_link_ksettings = niu_set_link_ksettings,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5c5952e782cd..dbfca0466760 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1250,12 +1250,17 @@ static void gem_stop_dma(struct gem *gp)
// XXX dbl check what that function should do when called on PCS PHY
-static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+static void gem_begin_auto_negotiation(struct gem *gp,
+ const struct ethtool_link_ksettings *ep)
{
u32 advertise, features;
int autoneg;
int speed;
int duplex;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ ep->link_modes.advertising);
if (gp->phy_type != phy_mii_mdio0 &&
gp->phy_type != phy_mii_mdio1)
@@ -1278,13 +1283,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
/* Setup link parameters */
if (!ep)
goto start_aneg;
- if (ep->autoneg == AUTONEG_ENABLE) {
- advertise = ep->advertising;
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
+ advertise = advertising;
autoneg = 1;
} else {
autoneg = 0;
- speed = ethtool_cmd_speed(ep);
- duplex = ep->duplex;
+ speed = ep->base.speed;
+ duplex = ep->base.duplex;
}
start_aneg:
@@ -2515,85 +2520,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
-static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
+ u32 supported, advertising;
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
if (gp->phy_mii.def)
- cmd->supported = gp->phy_mii.def->features;
+ supported = gp->phy_mii.def->features;
else
- cmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Return current PHY settings */
- cmd->autoneg = gp->want_autoneg;
- ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
- cmd->duplex = gp->phy_mii.duplex;
- cmd->advertising = gp->phy_mii.advertising;
+ cmd->base.autoneg = gp->want_autoneg;
+ cmd->base.speed = gp->phy_mii.speed;
+ cmd->base.duplex = gp->phy_mii.duplex;
+ advertising = gp->phy_mii.advertising;
/* If we started with a forced mode, we don't have a default
* advertise set, we need to return something sensible so
* userland can re-enable autoneg properly.
*/
- if (cmd->advertising == 0)
- cmd->advertising = cmd->supported;
+ if (advertising == 0)
+ advertising = supported;
} else { // XXX PCS ?
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg);
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = cmd->port = cmd->phy_address =
- cmd->transceiver = cmd->autoneg = 0;
+ advertising = supported;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0;
+ cmd->base.port = 0;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = 0;
/* serdes means usually a Fibre connector, with most fixed */
if (gp->phy_type == phy_serdes) {
- cmd->port = PORT_FIBRE;
- cmd->supported = (SUPPORTED_1000baseT_Half |
+ cmd->base.port = PORT_FIBRE;
+ supported = (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE | SUPPORTED_Autoneg |
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- cmd->advertising = cmd->supported;
- cmd->transceiver = XCVR_INTERNAL;
+ advertising = supported;
if (gp->lstate == link_up)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = 1;
}
}
- cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE &&
- cmd->advertising == 0)
+ if (cmd->base.autoneg == AUTONEG_ENABLE &&
+ advertising == 0)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -2666,13 +2682,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops gem_ethtool_ops = {
.get_drvinfo = gem_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = gem_get_settings,
- .set_settings = gem_set_settings,
.nway_reset = gem_nway_reset,
.get_msglevel = gem_get_msglevel,
.set_msglevel = gem_set_msglevel,
.get_wol = gem_get_wol,
.set_wol = gem_set_wol,
+ .get_link_ksettings = gem_get_link_ksettings,
+ .set_link_ksettings = gem_set_link_ksettings,
};
static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72ff05cd3ed8..53ff66ef53ac 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp)
}
/* hp->happy_lock must be held */
-static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
- void __iomem *tregs,
- struct ethtool_cmd *ep)
+static void
+happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+ void __iomem *tregs,
+ const struct ethtool_link_ksettings *ep)
{
int timeout;
@@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
/* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
/* Advertise everything we can support. */
if (hp->sw_bmsr & BMSR_10HALF)
hp->sw_advertise |= (ADVERTISE_10HALF);
@@ -1384,14 +1385,14 @@ force_link:
/* Disable auto-negotiation in BMCR, enable the duplex and
* speed setting, init the timer state machine, and fire it off.
*/
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
hp->sw_bmcr = BMCR_SPEED100;
} else {
- if (ethtool_cmd_speed(ep) == SPEED_100)
+ if (ep->base.speed == SPEED_100)
hp->sw_bmcr = BMCR_SPEED100;
else
hp->sw_bmcr = 0;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
hp->sw_bmcr |= BMCR_FULLDPLX;
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev)
}
/* Ethtool support... */
-static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
u32 speed;
+ u32 supported;
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_TP; /* XXX no MII support */
- cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_TP; /* XXX no MII support */
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Record PHY settings. */
spin_lock_irq(&hp->happy_lock);
@@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irq(&hp->happy_lock);
if (hp->sw_bmcr & BMCR_ANENABLE) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
if (speed == SPEED_100)
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_100FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
else
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_10FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
- ethtool_cmd_speed_set(cmd, speed);
+ cmd->base.speed = speed;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
return 0;
}
-static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- ((ethtool_cmd_speed(cmd) != SPEED_100 &&
- ethtool_cmd_speed(cmd) != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Ok, do it to it. */
@@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev)
}
static const struct ethtool_ops hme_ethtool_ops = {
- .get_settings = hme_get_settings,
- .set_settings = hme_set_settings,
.get_drvinfo = hme_get_drvinfo,
.get_link = hme_get_link,
+ .get_link_ksettings = hme_get_link_ksettings,
+ .set_link_ksettings = hme_set_link_ksettings,
};
static int hme_version_printed;
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..a9503884e1c2
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,41 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config DWC_XLGMAC
+ tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support"
+ depends on HAS_IOMEM && HAS_DMA
+ select BITREVERSE
+ select CRC32
+ ---help---
+ This driver supports the Synopsys DesignWare Cores Enterprise
+ Ethernet (dwc-xlgmac).
+
+if DWC_XLGMAC
+
+config DWC_XLGMAC_PCI
+ tristate "XLGMAC PCI bus support"
+ depends on DWC_XLGMAC && PCI
+ ---help---
+ This selects the pci bus support for the dwc-xlgmac driver.
+ This driver was tested on Synopsys XLGMAC IP Prototyping Kit.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
+endif # DWC_XLGMAC
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..c06e2eb3be90
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o
+dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \
+ dwc-xlgmac-hw.o dwc-xlgmac-common.o
+
+dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
new file mode 100644
index 000000000000..726d78ac4907
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -0,0 +1,736 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
+
+static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+
+ /* Currently it uses a static mac address for test */
+ memcpy(pdata->mac_addr, dev_addr, netdev->addr_len);
+}
+
+static void xlgmac_default_config(struct xlgmac_pdata *pdata)
+{
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_128;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_128;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_25000;
+ pdata->sysclk_rate = XLGMAC_SYSCLOCK;
+
+ strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+}
+
+static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
+{
+ xlgmac_init_desc_ops(&pdata->desc_ops);
+ xlgmac_init_hw_ops(&pdata->hw_ops);
+}
+
+static int xlgmac_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ /* Set default configuration data */
+ xlgmac_default_config(pdata);
+
+ /* Set irq, base_addr, MAC address, */
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->mac_regs;
+ xlgmac_read_mac_addr(pdata);
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xlgmac_init_all_ops(pdata);
+
+ /* Issue software reset to device */
+ hw_ops->exit(pdata);
+
+ /* Populate the hardware features */
+ xlgmac_get_all_hw_features(pdata);
+ xlgmac_print_all_hw_features(pdata);
+
+ /* TODO: Set the PHY mode to XLGMII */
+
+ /* Set the DMA mask */
+ ret = dma_set_mask_and_coherent(pdata->dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n");
+ return ret;
+ }
+
+ /* Channel and ring params initializtion
+ * pdata->channel_count;
+ * pdata->tx_ring_count;
+ * pdata->rx_ring_count;
+ * pdata->tx_desc_count;
+ * pdata->rx_desc_count;
+ */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT);
+ pdata->tx_desc_count = XLGMAC_TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT);
+ pdata->rx_desc_count = XLGMAC_RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->hw_feat.tx_q_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real tx queue count\n");
+ return ret;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->hw_feat.rx_q_cnt);
+ pdata->rx_q_count = pdata->rx_ring_count;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real rx queue count\n");
+ return ret;
+ }
+
+ pdata->channel_count =
+ max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++)
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ i % pdata->rx_ring_count);
+
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_IP2TE_POS,
+ MAC_RSSCR_IP2TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_TCP4TE_POS,
+ MAC_RSSCR_TCP4TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_UDP4TE_POS,
+ MAC_RSSCR_UDP4TE_LEN, 1);
+
+ /* Set device operations */
+ netdev->netdev_ops = xlgmac_get_netdev_ops();
+
+ /* Set device features */
+ if (pdata->hw_feat.tso) {
+ netdev->hw_features = NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ } else if (pdata->hw_feat.tx_coe) {
+ netdev->hw_features = NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ }
+
+ if (pdata->hw_feat.rx_coe) {
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ }
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (pdata->hw_feat.sa_vlan_ins)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (pdata->hw_feat.vlhash)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
+ /* Tx coalesce parameters initialization */
+ pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES;
+
+ /* Rx coalesce parameters initialization */
+ pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES;
+
+ return 0;
+}
+
+int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res)
+{
+ struct xlgmac_pdata *pdata;
+ struct net_device *netdev;
+ int ret;
+
+ netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata),
+ XLGMAC_MAX_DMA_CHANNELS);
+
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, dev);
+ dev_set_drvdata(dev, netdev);
+ pdata = netdev_priv(netdev);
+ pdata->dev = dev;
+ pdata->netdev = netdev;
+
+ pdata->dev_irq = res->irq;
+ pdata->mac_regs = res->addr;
+
+ mutex_init(&pdata->rss_mutex);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ ret = xlgmac_init(pdata);
+ if (ret) {
+ dev_err(dev, "xlgmac init failed\n");
+ goto err_free_netdev;
+ }
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_free_netdev;
+ }
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return ret;
+}
+
+int xlgmac_drv_remove(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+
+ return 0;
+}
+
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ while (count--) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+
+ idx++;
+ }
+}
+
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx,
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+}
+
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+}
+
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_features *hw_feat = &pdata->hw_feat;
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+
+ mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R);
+ mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R);
+ mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = readl(pdata->mac_regs + MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_PHYIFSEL_POS,
+ MAC_HWF0R_PHYIFSEL_LEN);
+ hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_VLHASH_POS,
+ MAC_HWF0R_VLHASH_LEN);
+ hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SMASEL_POS,
+ MAC_HWF0R_SMASEL_LEN);
+ hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RWKSEL_POS,
+ MAC_HWF0R_RWKSEL_LEN);
+ hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MGKSEL_POS,
+ MAC_HWF0R_MGKSEL_LEN);
+ hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MMCSEL_POS,
+ MAC_HWF0R_MMCSEL_LEN);
+ hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ARPOFFSEL_POS,
+ MAC_HWF0R_ARPOFFSEL_LEN);
+ hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSEL_POS,
+ MAC_HWF0R_TSSEL_LEN);
+ hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_EEESEL_POS,
+ MAC_HWF0R_EEESEL_LEN);
+ hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TXCOESEL_POS,
+ MAC_HWF0R_TXCOESEL_LEN);
+ hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RXCOESEL_POS,
+ MAC_HWF0R_RXCOESEL_LEN);
+ hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ADDMACADRSEL_POS,
+ MAC_HWF0R_ADDMACADRSEL_LEN);
+ hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSTSSEL_POS,
+ MAC_HWF0R_TSSTSSEL_LEN);
+ hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SAVLANINS_POS,
+ MAC_HWF0R_SAVLANINS_LEN);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RXFIFOSIZE_POS,
+ MAC_HWF1R_RXFIFOSIZE_LEN);
+ hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TXFIFOSIZE_POS,
+ MAC_HWF1R_TXFIFOSIZE_LEN);
+ hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADVTHWORD_POS,
+ MAC_HWF1R_ADVTHWORD_LEN);
+ hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADDR64_POS,
+ MAC_HWF1R_ADDR64_LEN);
+ hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DCBEN_POS,
+ MAC_HWF1R_DCBEN_LEN);
+ hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_SPHEN_POS,
+ MAC_HWF1R_SPHEN_LEN);
+ hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TSOEN_POS,
+ MAC_HWF1R_TSOEN_LEN);
+ hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DBGMEMA_POS,
+ MAC_HWF1R_DBGMEMA_LEN);
+ hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RSSEN_POS,
+ MAC_HWF1R_RSSEN_LEN);
+ hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_NUMTC_POS,
+ MAC_HWF1R_NUMTC_LEN);
+ hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_HASHTBLSZ_POS,
+ MAC_HWF1R_HASHTBLSZ_LEN);
+ hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_L3L4FNUM_POS,
+ MAC_HWF1R_L3L4FNUM_LEN);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXQCNT_POS,
+ MAC_HWF2R_RXQCNT_LEN);
+ hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXQCNT_POS,
+ MAC_HWF2R_TXQCNT_LEN);
+ hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXCHCNT_POS,
+ MAC_HWF2R_RXCHCNT_LEN);
+ hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXCHCNT_POS,
+ MAC_HWF2R_TXCHCNT_LEN);
+ hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_PPSOUTNUM_POS,
+ MAC_HWF2R_PPSOUTNUM_LEN);
+ hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_AUXSNAPNUM_POS,
+ MAC_HWF2R_AUXSNAPNUM_LEN);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+}
+
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ char *str = NULL;
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+ XLGMAC_PR("HW support following features\n");
+ XLGMAC_PR("\n");
+ /* HW Feature Register0 */
+ XLGMAC_PR("VLAN Hash Filter Selected : %s\n",
+ pdata->hw_feat.vlhash ? "YES" : "NO");
+ XLGMAC_PR("SMA (MDIO) Interface : %s\n",
+ pdata->hw_feat.sma ? "YES" : "NO");
+ XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n",
+ pdata->hw_feat.rwk ? "YES" : "NO");
+ XLGMAC_PR("PMT Magic Packet Enable : %s\n",
+ pdata->hw_feat.mgk ? "YES" : "NO");
+ XLGMAC_PR("RMON/MMC Module Enable : %s\n",
+ pdata->hw_feat.mmc ? "YES" : "NO");
+ XLGMAC_PR("ARP Offload Enabled : %s\n",
+ pdata->hw_feat.aoe ? "YES" : "NO");
+ XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n",
+ pdata->hw_feat.ts ? "YES" : "NO");
+ XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n",
+ pdata->hw_feat.eee ? "YES" : "NO");
+ XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.tx_coe ? "YES" : "NO");
+ XLGMAC_PR("Receive Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.rx_coe ? "YES" : "NO");
+ XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n",
+ pdata->hw_feat.addn_mac ? "YES" : "NO");
+
+ switch (pdata->hw_feat.ts_src) {
+ case 0:
+ str = "RESERVED";
+ break;
+ case 1:
+ str = "INTERNAL";
+ break;
+ case 2:
+ str = "EXTERNAL";
+ break;
+ case 3:
+ str = "BOTH";
+ break;
+ }
+ XLGMAC_PR("Timestamp System Time Source : %s\n", str);
+
+ XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n",
+ pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
+
+ /* HW Feature Register1 */
+ switch (pdata->hw_feat.rx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Receive FIFO Size : %s\n", str);
+
+ switch (pdata->hw_feat.tx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str);
+
+ XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n",
+ pdata->hw_feat.adv_ts_hi ? "YES" : "NO");
+ XLGMAC_PR("Address width : %u\n",
+ pdata->hw_feat.dma_width);
+ XLGMAC_PR("DCB Feature Enable : %s\n",
+ pdata->hw_feat.dcb ? "YES" : "NO");
+ XLGMAC_PR("Split Header Feature Enable : %s\n",
+ pdata->hw_feat.sph ? "YES" : "NO");
+ XLGMAC_PR("TCP Segmentation Offload Enable : %s\n",
+ pdata->hw_feat.tso ? "YES" : "NO");
+ XLGMAC_PR("DMA Debug Registers Enabled : %s\n",
+ pdata->hw_feat.dma_debug ? "YES" : "NO");
+ XLGMAC_PR("RSS Feature Enabled : %s\n",
+ pdata->hw_feat.rss ? "YES" : "NO");
+ XLGMAC_PR("Number of Traffic classes : %u\n",
+ (pdata->hw_feat.tc_cnt));
+ XLGMAC_PR("Hash Table Size : %u\n",
+ pdata->hw_feat.hash_table_size);
+ XLGMAC_PR("Total number of L3 or L4 Filters : %u\n",
+ pdata->hw_feat.l3l4_filter_num);
+
+ /* HW Feature Register2 */
+ XLGMAC_PR("Number of MTL Receive Queues : %u\n",
+ pdata->hw_feat.rx_q_cnt);
+ XLGMAC_PR("Number of MTL Transmit Queues : %u\n",
+ pdata->hw_feat.tx_q_cnt);
+ XLGMAC_PR("Number of DMA Receive Channels : %u\n",
+ pdata->hw_feat.rx_ch_cnt);
+ XLGMAC_PR("Number of DMA Transmit Channels : %u\n",
+ pdata->hw_feat.tx_ch_cnt);
+
+ switch (pdata->hw_feat.pps_out_num) {
+ case 0:
+ str = "No PPS output";
+ break;
+ case 1:
+ str = "1 PPS output";
+ break;
+ case 2:
+ str = "2 PPS output";
+ break;
+ case 3:
+ str = "3 PPS output";
+ break;
+ case 4:
+ str = "4 PPS output";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of PPS Outputs : %s\n", str);
+
+ switch (pdata->hw_feat.aux_snap_num) {
+ case 0:
+ str = "No auxiliary input";
+ break;
+ case 1:
+ str = "1 auxiliary input";
+ break;
+ case 2:
+ str = "2 auxiliary input";
+ break;
+ case 3:
+ str = "3 auxiliary input";
+ break;
+ case 4:
+ str = "4 auxiliary input";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str);
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
new file mode 100644
index 000000000000..55c796ed7d26
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -0,0 +1,648 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data)
+{
+ if (desc_data->skb_dma) {
+ if (desc_data->mapped_as_page) {
+ dma_unmap_page(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ }
+ desc_data->skb_dma = 0;
+ desc_data->skb_dma_len = 0;
+ }
+
+ if (desc_data->skb) {
+ dev_kfree_skb_any(desc_data->skb);
+ desc_data->skb = NULL;
+ }
+
+ if (desc_data->rx.hdr.pa.pages)
+ put_page(desc_data->rx.hdr.pa.pages);
+
+ if (desc_data->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
+ desc_data->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.hdr.pa_unmap.pages);
+ }
+
+ if (desc_data->rx.buf.pa.pages)
+ put_page(desc_data->rx.buf.pa.pages);
+
+ if (desc_data->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
+ desc_data->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&desc_data->tx, 0, sizeof(desc_data->tx));
+ memset(&desc_data->rx, 0, sizeof(desc_data->rx));
+
+ desc_data->mapped_as_page = 0;
+
+ if (desc_data->state_saved) {
+ desc_data->state_saved = 0;
+ desc_data->state.skb = NULL;
+ desc_data->state.len = 0;
+ desc_data->state.error = 0;
+ }
+}
+
+static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->desc_data_head) {
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ kfree(ring->desc_data_head);
+ ring->desc_data_head = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->dma_desc_head) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ ring->dma_desc_count),
+ ring->dma_desc_head,
+ ring->dma_desc_head_addr);
+ ring->dma_desc_head = NULL;
+ }
+}
+
+static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int dma_desc_count)
+{
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->dma_desc_count = dma_desc_count;
+ ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ dma_desc_count),
+ &ring->dma_desc_head_addr,
+ GFP_KERNEL);
+ if (!ring->dma_desc_head)
+ return -ENOMEM;
+
+ /* Array of descriptor data */
+ ring->desc_data_head = kcalloc(dma_desc_count,
+ sizeof(struct xlgmac_desc_data),
+ GFP_KERNEL);
+ if (!ring->desc_data_head)
+ return -ENOMEM;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
+ ring->dma_desc_head,
+ &ring->dma_desc_head_addr,
+ ring->desc_data_head);
+
+ return 0;
+}
+
+static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (!pdata->channel_head)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xlgmac_free_ring(pdata, channel->tx_ring);
+ xlgmac_free_ring(pdata, channel->rx_ring);
+ }
+}
+
+static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring");
+ goto err_init_ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Rx ring\n");
+ goto err_init_ring;
+ }
+ }
+
+ return 0;
+
+err_init_ring:
+ xlgmac_free_rings(pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
+{
+ if (!pdata->channel_head)
+ return;
+
+ kfree(pdata->channel_head->tx_ring);
+ pdata->channel_head->tx_ring = NULL;
+
+ kfree(pdata->channel_head->rx_ring);
+ pdata->channel_head->rx_ring = NULL;
+
+ kfree(pdata->channel_head);
+
+ pdata->channel_head = NULL;
+ pdata->channel_count = 0;
+}
+
+static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel_head, *channel;
+ struct xlgmac_ring *tx_ring, *rx_ring;
+ int ret = -ENOMEM;
+ unsigned int i;
+
+ channel_head = kcalloc(pdata->channel_count,
+ sizeof(struct xlgmac_channel), GFP_KERNEL);
+ if (!channel_head)
+ return ret;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "channel_head=%p\n", channel_head);
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_head; i < pdata->channel_count;
+ i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the per DMA interrupt */
+ ret = pdata->channel_irq[i];
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count)
+ channel->tx_ring = tx_ring++;
+
+ if (i < pdata->rx_ring_count)
+ channel->rx_ring = rx_ring++;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
+ channel->name, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_head = channel_head;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_head);
+
+ return ret;
+}
+
+static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ xlgmac_free_rings(pdata);
+
+ xlgmac_free_channels(pdata);
+}
+
+static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ ret = xlgmac_alloc_channels(pdata);
+ if (ret)
+ goto err_alloc;
+
+ ret = xlgmac_alloc_rings(pdata);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_alloc:
+ xlgmac_free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
+ struct xlgmac_page_alloc *pa,
+ gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
+ struct xlgmac_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
+ GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
+ GFP_ATOMIC, order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
+ XLGMAC_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_ops->tx_desc_init(channel);
+ }
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_ops->rx_desc_init(channel);
+ }
+}
+
+static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
+ struct sk_buff *skb)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int start_index, cur_index;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int offset, datalen, len;
+ struct xlgmac_pkt_info *pkt_info;
+ struct skb_frag_struct *frag;
+ unsigned int tso, vlan;
+ dma_addr_t skb_dma;
+ unsigned int i;
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ pkt_info = &ring->pkt_info;
+ pkt_info->desc_count = 0;
+ pkt_info->length = 0;
+
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
+ (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pkt_info->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = pkt_info->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, pkt_info->header_len);
+
+ offset = pkt_info->header_len;
+
+ pkt_info->length += pkt_info->header_len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ desc_data->mapped_as_page = 1;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so desc_data is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
+ desc_data->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ pkt_info->desc_count = cur_index - start_index;
+
+ return pkt_info->desc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ return 0;
+}
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
+{
+ desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+ desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
+ desc_ops->map_tx_skb = xlgmac_map_tx_skb;
+ desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
+ desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
+ desc_ops->tx_desc_init = xlgmac_tx_desc_init;
+ desc_ops->rx_desc_init = xlgmac_rx_desc_init;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
new file mode 100644
index 000000000000..5cf3e90d4834
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -0,0 +1,3146 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
+{
+ return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN);
+}
+
+static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
+ writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
+ struct netdev_hw_addr *ha,
+ unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
+
+ mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
+ MAC_MACA1HR_AE_POS,
+ MAC_MACA1HR_AE_LEN,
+ 1);
+ }
+
+ writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+ writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Put the VLAN tag in the Rx descriptor */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
+ MAC_VLANTR_EVLRXS_LEN, 1);
+ /* Don't check the VLAN type */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
+ MAC_VLANTR_DOVLTC_LEN, 1);
+ /* Check only C-TAG (0x8100) packets */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
+ MAC_VLANTR_ERSVLM_LEN, 0);
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
+ MAC_VLANTR_ESVL_LEN, 0);
+ /* Enable VLAN tag stripping */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Enable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Enable VLAN Hash Table filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
+ MAC_VLANTR_VTHM_LEN, 1);
+ /* Disable VLAN tag inverse matching */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
+ MAC_VLANTR_VTIM_LEN, 0);
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
+ MAC_VLANTR_ETV_LEN, 1);
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
+ MAC_VLANTR_VL_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Disable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static u32 xlgmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 poly = 0xedb88320;
+ u32 crc = ~0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
+{
+ u16 vlan_hash_table = 0;
+ __le16 vid_le;
+ u32 regval;
+ u32 crc;
+ u16 vid;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ regval = readl(pdata->mac_regs + MAC_VLANHTR);
+ /* Set the VLAN Hash Table filtering register */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
+ MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
+ writel(regval, pdata->mac_regs + MAC_VLANHTR);
+
+ return 0;
+}
+
+static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
+ MAC_PFR_PR_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ /* Hardware will still perform VLAN filtering in promiscuous mode */
+ if (enable) {
+ xlgmac_disable_rx_vlan_filtering(pdata);
+ } else {
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
+ MAC_PFR_PM_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int addn_macs;
+ unsigned int mac_reg;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xlgmac_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xlgmac_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int i;
+ u32 crc;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ writel(hash_table[i], pdata->mac_regs + hash_reg);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xlgmac_set_mac_hash_table(pdata);
+ else
+ xlgmac_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
+ MAC_PFR_HPF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
+ MAC_PFR_HUC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
+ MAC_PFR_HMC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+ }
+}
+
+static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int val;
+ u32 regval;
+
+ val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
+ MAC_RCR_JE_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xlgmac_enable_rx_csum(pdata);
+ else
+ xlgmac_disable_rx_csum(pdata);
+}
+
+static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANIR);
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
+ MAC_VLANIR_CSVL_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
+ MAC_VLANIR_VLTI_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANIR);
+
+ /* Set the current VLAN Hash Table register value */
+ xlgmac_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ else
+ xlgmac_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xlgmac_enable_rx_vlan_stripping(pdata);
+ else
+ xlgmac_disable_rx_vlan_stripping(pdata);
+}
+
+static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xlgmac_set_promiscuous_mode(pdata, pr_mode);
+ xlgmac_set_all_multicast_mode(pdata, am_mode);
+
+ xlgmac_add_mac_addresses(pdata);
+
+ return 0;
+}
+
+static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
+ struct xlgmac_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned long tx_timeout;
+ unsigned int tx_status;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = readl(pdata->mac_regs + tx_dsr);
+ tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
+ DMA_DSR_TPS_LEN);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
+static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN,
+ MTL_Q_ENABLED);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Enable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+}
+
+static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xlgmac_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+}
+
+static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status, prxq, rxqsts;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, rx_timeout)) {
+ rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
+ prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
+ MTL_Q_RQDR_PRXQ_LEN);
+ rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
+ MTL_Q_RQDR_RXQSTS_LEN);
+ if ((prxq == 0) && (rxqsts == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, rx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int regval, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ /* Enable each Rx queue */
+ regval = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ regval |= (0x02 << (i << 1));
+ writel(regval, pdata->mac_regs + MAC_RQC0R);
+
+ /* Enable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Disable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ xlgmac_prepare_rx_stop(pdata, i);
+
+ /* Disable each Rx queue */
+ writel(0, pdata->mac_regs + MAC_RQC0R);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_desc_data *desc_data;
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
+
+ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int tso_context, vlan_context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int csum, tso, vlan;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ unsigned int tx_set_ic;
+ int i;
+
+ pkt_info = &ring->pkt_info;
+ csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ if (tso && (pkt_info->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += pkt_info->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (pkt_info->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ pkt_info->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Create a context descriptor if this is a TSO pkt_info */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ pkt_info->mss);
+
+ /* Set the MSS size */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_CONTEXT_DESC2_MSS_POS,
+ TX_CONTEXT_DESC2_MSS_LEN,
+ pkt_info->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Indicate this descriptor contains the MSS */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_TCMSSV_POS,
+ TX_CONTEXT_DESC3_TCMSSV_LEN,
+ 1);
+
+ ring->tx.cur_mss = pkt_info->mss;
+ }
+
+ if (vlan_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ pkt_info->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Set the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VT_POS,
+ TX_CONTEXT_DESC3_VT_LEN,
+ pkt_info->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VLTV_POS,
+ TX_CONTEXT_DESC3_VLTV_LEN,
+ 1);
+
+ ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
+ }
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_VTIR_POS,
+ TX_NORMAL_DESC2_VTIR_LEN,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Timestamp enablement check */
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_PTP_POS,
+ TX_PACKET_ATTRIBUTES_PTP_LEN))
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_TTSE_POS,
+ TX_NORMAL_DESC2_TTSE_LEN,
+ 1);
+
+ /* Mark it as First Descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FD_POS,
+ TX_NORMAL_DESC3_FD_LEN,
+ 1);
+
+ /* Mark it as a NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN,
+ 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ if (tso) {
+ /* Enable TSO */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TSE_POS,
+ TX_NORMAL_DESC3_TSE_LEN, 1);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPPL_POS,
+ TX_NORMAL_DESC3_TCPPL_LEN,
+ pkt_info->tcp_payload_len);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPHDRLEN_POS,
+ TX_NORMAL_DESC3_TCPHDRLEN_LEN,
+ pkt_info->tcp_header_len / 4);
+
+ pdata->stats.tx_tso_packets++;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CPC_POS,
+ TX_NORMAL_DESC3_CPC_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+
+ /* Set the total length to be transmitted */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FL_POS,
+ TX_NORMAL_DESC3_FL_LEN,
+ pkt_info->length);
+ }
+
+ for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Update buffer address */
+ dma_desc->desc0 =
+ cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 =
+ cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* Set OWN bit */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_IC_POS,
+ TX_NORMAL_DESC2_IC_LEN, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ desc_data->tx.packets = pkt_info->tx_packets;
+ desc_data->tx.bytes = pkt_info->tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ dma_desc = desc_data->dma_desc;
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ if (netif_msg_tx_queued(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, start_index,
+ pkt_info->desc_count, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ smp_wmb();
+
+ ring->cur = cur_index + 1;
+ if (!pkt_info->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xlgmac_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ XLGMAC_PR("%s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->dma_desc_count - 1),
+ (ring->cur - 1) & (ring->dma_desc_count - 1));
+}
+
+static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
+ struct xlgmac_dma_desc *dma_desc)
+{
+ u32 tsa, tsd;
+ u64 nsec;
+
+ tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSA_POS,
+ RX_CONTEXT_DESC3_TSA_LEN);
+ tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSD_POS,
+ RX_CONTEXT_DESC3_TSD_LEN);
+ if (tsa && !tsd) {
+ nsec = le32_to_cpu(dma_desc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(dma_desc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ pkt_info->rx_tstamp = nsec;
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
+ 1);
+ }
+ }
+}
+
+static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ dma_desc->desc0 = 0;
+ dma_desc->desc1 = 0;
+ dma_desc->desc2 = 0;
+ dma_desc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct xlgmac_desc_data *desc_data;
+ int start_index = ring->cur;
+ int i;
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xlgmac_tx_desc_reset(desc_data);
+ }
+
+ /* Update the total number of Tx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
+}
+
+static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ dma_addr_t hdr_dma, buf_dma;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
+ buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_INTE_POS,
+ RX_NORMAL_DESC3_INTE_LEN,
+ inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ dma_wmb();
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ unsigned int start_index = ring->cur;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Rx descriptor */
+ xlgmac_rx_desc_reset(pdata, desc_data, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
+ ring->dma_desc_count - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN);
+}
+
+static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN);
+}
+
+static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Clear MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN,
+ 0);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Set MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+
+ /* Enable transmit flow control */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN, 1);
+ /* Set pause time */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
+ MAC_Q0TFCR_PT_LEN, 0xffff);
+
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->tx_pause)
+ xlgmac_enable_tx_flow_control(pdata);
+ else
+ xlgmac_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->rx_pause)
+ xlgmac_enable_rx_flow_control(pdata);
+ else
+ xlgmac_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
+ DMA_CH_RIWT_RWT_LEN,
+ pdata->rx_riwt);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
+{
+ xlgmac_config_tx_flow_control(pdata);
+ xlgmac_config_rx_flow_control(pdata);
+}
+
+static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
+ MTL_Q_RQOMR_FEP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
+ MTL_Q_RQOMR_FUP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
+{
+ return 0;
+}
+
+static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
+ DMA_CH_RCR_RBSZ_LEN,
+ pdata->rx_buf_size);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ if (pdata->hw_feat.tso) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
+ DMA_CH_TCR_TSE_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+ }
+}
+
+static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
+ DMA_CH_CR_SPH_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
+ MAC_RCR_HDSMS_LEN,
+ XLGMAC_SPH_HDSMS_SIZE);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ return ret;
+}
+
+static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ return ret;
+}
+
+static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
+ MTL_Q_RQOMR_RTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
+ MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
+ MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
+ MTL_TC_QWR_QW_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ }
+
+ /* Set Rx to strict priority algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
+ MTL_OMR_RAA_LEN, MTL_RAA_SP);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+}
+
+static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
+{
+ unsigned int ppq, ppq_extra, prio, prio_queues;
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int reg, regval;
+ unsigned int mask;
+ unsigned int i, j;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+
+ if (i < qptc_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ regval = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ if (i < ppq_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ writel(regval, pdata->mac_regs + reg);
+ reg += MAC_RQC2_INC;
+ regval = 0;
+ }
+
+ /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
+ * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
+ */
+ reg = MTL_RQDCM0R;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
+ MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
+ MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
+ MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+}
+
+static unsigned int xlgmac_calculate_per_queue_fifo(
+ unsigned int fifo_size,
+ unsigned int queue_count)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+
+ /* Calculate the configured fifo size */
+ q_fifo_size = 1 << (fifo_size + 7);
+
+ /* The configured value may not be the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
+
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Each increment in the queue fifo size represents 256 bytes of
+ * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+ * between the queues.
+ */
+ p_fifo = q_fifo_size / 256;
+ if (p_fifo)
+ p_fifo--;
+
+ return p_fifo;
+}
+
+static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.tx_fifo_size,
+ pdata->tx_q_count);
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
+ MTL_Q_TQOMR_TQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.rx_fifo_size,
+ pdata->rx_q_count);
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
+ MTL_Q_RQOMR_RQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ /* Activate flow control when less than 4k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
+ MTL_Q_RQFCR_RFA_LEN, 2);
+ /* De-activate flow control when more than 6k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
+ MTL_Q_RQFCR_RFD_LEN, 4);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ }
+}
+
+static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
+ MTL_Q_TQOMR_TTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
+ MTL_Q_RQOMR_RSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
+ MTL_Q_TQOMR_TSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
+ DMA_CH_TCR_OSP_LEN,
+ pdata->tx_osp_mode);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
+ DMA_CH_CR_PBLX8_LEN,
+ pdata->pblx8);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN,
+ pdata->tx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN,
+ pdata->rx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ return 0;
+}
+
+static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
+
+ val = (u64)readl(pdata->mac_regs + reg_lo);
+
+ if (read_hi)
+ val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_GB_POS,
+ MMC_TISR_TXOCTETCOUNT_GB_LEN))
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_GB_POS,
+ MMC_TISR_TXFRAMECOUNT_GB_LEN))
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_G_POS,
+ MMC_TISR_TXBROADCASTFRAMES_G_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_G_POS,
+ MMC_TISR_TXMULTICASTFRAMES_G_LEN))
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX64OCTETS_GB_POS,
+ MMC_TISR_TX64OCTETS_GB_LEN))
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX65TO127OCTETS_GB_POS,
+ MMC_TISR_TX65TO127OCTETS_GB_LEN))
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX128TO255OCTETS_GB_POS,
+ MMC_TISR_TX128TO255OCTETS_GB_LEN))
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX256TO511OCTETS_GB_POS,
+ MMC_TISR_TX256TO511OCTETS_GB_LEN))
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX512TO1023OCTETS_GB_POS,
+ MMC_TISR_TX512TO1023OCTETS_GB_LEN))
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNICASTFRAMES_GB_POS,
+ MMC_TISR_TXUNICASTFRAMES_GB_LEN))
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_GB_POS,
+ MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_GB_POS,
+ MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNDERFLOWERROR_POS,
+ MMC_TISR_TXUNDERFLOWERROR_LEN))
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_G_POS,
+ MMC_TISR_TXOCTETCOUNT_G_LEN))
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_G_POS,
+ MMC_TISR_TXFRAMECOUNT_G_LEN))
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXPAUSEFRAMES_POS,
+ MMC_TISR_TXPAUSEFRAMES_LEN))
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXVLANFRAMES_G_POS,
+ MMC_TISR_TXVLANFRAMES_G_LEN))
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFRAMECOUNT_GB_POS,
+ MMC_RISR_RXFRAMECOUNT_GB_LEN))
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_GB_POS,
+ MMC_RISR_RXOCTETCOUNT_GB_LEN))
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_G_POS,
+ MMC_RISR_RXOCTETCOUNT_G_LEN))
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXBROADCASTFRAMES_G_POS,
+ MMC_RISR_RXBROADCASTFRAMES_G_LEN))
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXMULTICASTFRAMES_G_POS,
+ MMC_RISR_RXMULTICASTFRAMES_G_LEN))
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXCRCERROR_POS,
+ MMC_RISR_RXCRCERROR_LEN))
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXRUNTERROR_POS,
+ MMC_RISR_RXRUNTERROR_LEN))
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXJABBERERROR_POS,
+ MMC_RISR_RXJABBERERROR_LEN))
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNDERSIZE_G_POS,
+ MMC_RISR_RXUNDERSIZE_G_LEN))
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOVERSIZE_G_POS,
+ MMC_RISR_RXOVERSIZE_G_LEN))
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX64OCTETS_GB_POS,
+ MMC_RISR_RX64OCTETS_GB_LEN))
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX65TO127OCTETS_GB_POS,
+ MMC_RISR_RX65TO127OCTETS_GB_LEN))
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX128TO255OCTETS_GB_POS,
+ MMC_RISR_RX128TO255OCTETS_GB_LEN))
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX256TO511OCTETS_GB_POS,
+ MMC_RISR_RX256TO511OCTETS_GB_LEN))
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX512TO1023OCTETS_GB_POS,
+ MMC_RISR_RX512TO1023OCTETS_GB_LEN))
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNICASTFRAMES_G_POS,
+ MMC_RISR_RXUNICASTFRAMES_G_LEN))
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXLENGTHERROR_POS,
+ MMC_RISR_RXLENGTHERROR_LEN))
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOUTOFRANGETYPE_POS,
+ MMC_RISR_RXOUTOFRANGETYPE_LEN))
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXPAUSEFRAMES_POS,
+ MMC_RISR_RXPAUSEFRAMES_LEN))
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFIFOOVERFLOW_POS,
+ MMC_RISR_RXFIFOOVERFLOW_LEN))
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXVLANFRAMES_GB_POS,
+ MMC_RISR_RXVLANFRAMES_GB_LEN))
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXWATCHDOGERROR_POS,
+ MMC_RISR_RXWATCHDOGERROR_LEN))
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_stats *stats = &pdata->stats;
+ u32 regval;
+
+ /* Freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 0);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MMC_CR);
+ /* Set counters to reset on read */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
+ MMC_CR_ROR_LEN, 1);
+ /* Reset the counters */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
+ MMC_CR_CR_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+ u32 regval;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
+ if (regval) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ writel(val, pdata->mac_regs + MAC_RSSDR);
+
+ regval = readl(pdata->mac_regs + MAC_RSSAR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
+ MAC_RSSAR_RSSIA_LEN, index);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
+ MAC_RSSAR_ADDRT_LEN, type);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
+ MAC_RSSAR_CT_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSAR);
+
+ wait = 1000;
+ while (wait--) {
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN);
+ if (!regval)
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xlgmac_write_rss_reg(pdata,
+ XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xlgmac_write_rss_hash_key(pdata);
+}
+
+static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+ u32 tval;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ tval = table[i];
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ tval);
+ }
+
+ return xlgmac_write_rss_lookup_table(pdata);
+}
+
+static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xlgmac_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xlgmac_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
+
+ /* Enable RSS */
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xlgmac_enable_rss(pdata);
+ else
+ ret = xlgmac_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int dma_ch_isr, dma_ch_ier;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_NIE_POS,
+ DMA_CH_IER_NIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_AIE_POS,
+ DMA_CH_IER_AIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN,
+ 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN,
+ 1);
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN,
+ 1);
+ }
+
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+ }
+}
+
+static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int q_count, i;
+ unsigned int mtl_q_isr;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+ writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+
+ /* No MTL interrupts to be enabled */
+ writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
+ }
+}
+
+static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int mac_ier = 0;
+ u32 regval;
+
+ /* Enable Timestamp interrupt */
+ mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
+ MAC_IER_TSIE_LEN, 1);
+
+ writel(mac_ier, pdata->mac_regs + MAC_IER);
+
+ /* Enable all counter interrupts */
+ regval = readl(pdata->mac_regs + MMC_RIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
+ MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_RIER);
+ regval = readl(pdata->mac_regs + MMC_TIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
+ MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_TIER);
+}
+
+static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x1)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x2)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x2);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x3)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_100000:
+ xlgmac_set_xlgmii_100000_speed(pdata);
+ break;
+
+ case SPEED_50000:
+ xlgmac_set_xlgmii_50000_speed(pdata);
+ break;
+
+ case SPEED_40000:
+ xlgmac_set_xlgmii_40000_speed(pdata);
+ break;
+
+ case SPEED_25000:
+ xlgmac_set_xlgmii_25000_speed(pdata);
+ break;
+ }
+}
+
+static int xlgmac_dev_read(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int err, etlt, l34t;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ dma_desc = desc_data->dma_desc;
+ pkt_info = &ring->pkt_info;
+
+ /* Check for data availability */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ dma_rmb();
+
+ if (netif_msg_rx_status(pdata))
+ xlgmac_dump_rx_desc(pdata, ring, ring->cur);
+
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CTXT_POS,
+ RX_NORMAL_DESC3_CTXT_LEN)) {
+ /* Timestamp Context Descriptor */
+ xlgmac_get_rx_tstamp(pkt_info, dma_desc);
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CDA_POS,
+ RX_NORMAL_DESC3_CDA_LEN))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 1);
+
+ /* Get the header length */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_FD_POS,
+ RX_NORMAL_DESC3_FD_LEN)) {
+ desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
+ RX_NORMAL_DESC2_HL_POS,
+ RX_NORMAL_DESC2_HL_LEN);
+ if (desc_data->rx.hdr_len)
+ pdata->stats.rx_split_header_packets++;
+ }
+
+ /* Get the RSS hash */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_RSV_POS,
+ RX_NORMAL_DESC3_RSV_LEN)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
+ 1);
+
+ pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
+
+ l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_L34T_POS,
+ RX_NORMAL_DESC3_L34T_LEN);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Get the pkt_info length */
+ desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_PL_POS,
+ RX_NORMAL_DESC3_PL_LEN);
+
+ if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_LD_POS,
+ RX_NORMAL_DESC3_LD_LEN)) {
+ /* Not all the data has been transferred for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (netdev->features & NETIF_F_RXCSUM)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ES_POS,
+ RX_NORMAL_DESC3_ES_LEN);
+ etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ETLT_POS,
+ RX_NORMAL_DESC3_ETLT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ pkt_info->vlan_ctag =
+ XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
+ RX_NORMAL_DESC0_OVT_POS,
+ RX_NORMAL_DESC0_OVT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ pkt_info->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 0);
+ else
+ pkt_info->errors = XLGMAC_SET_REG_BITS(
+ pkt_info->errors,
+ RX_PACKET_ERRORS_FRAME_POS,
+ RX_PACKET_ERRORS_FRAME_LEN,
+ 1);
+ }
+
+ XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->dma_desc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xlgmac_enable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ dma_ch_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_disable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
+{
+ unsigned int i, count;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN);
+ while (--count && regval)
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + DMA_SBMR);
+ /* Set enhanced addressing mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
+ DMA_SBMR_EAME_LEN, 1);
+ /* Set the System Bus mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
+ DMA_SBMR_UNDEF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
+ DMA_SBMR_BLEN_256_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_SBMR);
+}
+
+static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ int ret;
+
+ /* Flush Tx queues */
+ ret = xlgmac_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /* Initialize DMA related features */
+ xlgmac_config_dma_bus(pdata);
+ xlgmac_config_osp_mode(pdata);
+ xlgmac_config_pblx8(pdata);
+ xlgmac_config_tx_pbl_val(pdata);
+ xlgmac_config_rx_pbl_val(pdata);
+ xlgmac_config_rx_coalesce(pdata);
+ xlgmac_config_tx_coalesce(pdata);
+ xlgmac_config_rx_buffer_size(pdata);
+ xlgmac_config_tso_mode(pdata);
+ xlgmac_config_sph_mode(pdata);
+ xlgmac_config_rss(pdata);
+ desc_ops->tx_desc_init(pdata);
+ desc_ops->rx_desc_init(pdata);
+ xlgmac_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ xlgmac_config_mtl_mode(pdata);
+ xlgmac_config_queue_mapping(pdata);
+ xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
+ xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
+ xlgmac_config_tx_fifo_size(pdata);
+ xlgmac_config_rx_fifo_size(pdata);
+ xlgmac_config_flow_control_threshold(pdata);
+ xlgmac_config_rx_fep_enable(pdata);
+ xlgmac_config_rx_fup_enable(pdata);
+ xlgmac_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ xlgmac_config_mac_address(pdata);
+ xlgmac_config_rx_mode(pdata);
+ xlgmac_config_jumbo_enable(pdata);
+ xlgmac_config_flow_control(pdata);
+ xlgmac_config_mac_speed(pdata);
+ xlgmac_config_checksum_offload(pdata);
+ xlgmac_config_vlan_support(pdata);
+ xlgmac_config_mmc(pdata);
+ xlgmac_enable_mac_interrupts(pdata);
+
+ return 0;
+}
+
+static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
+{
+ unsigned int count = 2000;
+ u32 regval;
+
+ /* Issue a software reset */
+ regval = readl(pdata->mac_regs + DMA_MR);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
+ DMA_MR_SWR_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_MR);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (--count &&
+ XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
+ DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
+{
+ hw_ops->init = xlgmac_hw_init;
+ hw_ops->exit = xlgmac_hw_exit;
+
+ hw_ops->tx_complete = xlgmac_tx_complete;
+
+ hw_ops->enable_tx = xlgmac_enable_tx;
+ hw_ops->disable_tx = xlgmac_disable_tx;
+ hw_ops->enable_rx = xlgmac_enable_rx;
+ hw_ops->disable_rx = xlgmac_disable_rx;
+
+ hw_ops->dev_xmit = xlgmac_dev_xmit;
+ hw_ops->dev_read = xlgmac_dev_read;
+ hw_ops->enable_int = xlgmac_enable_int;
+ hw_ops->disable_int = xlgmac_disable_int;
+
+ hw_ops->set_mac_address = xlgmac_set_mac_address;
+ hw_ops->config_rx_mode = xlgmac_config_rx_mode;
+ hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
+ hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
+
+ /* For MII speed configuration */
+ hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
+ hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
+ hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
+ hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
+
+ /* For descriptor related operation */
+ hw_ops->tx_desc_init = xlgmac_tx_desc_init;
+ hw_ops->rx_desc_init = xlgmac_rx_desc_init;
+ hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
+ hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
+ hw_ops->is_last_desc = xlgmac_is_last_desc;
+ hw_ops->is_context_desc = xlgmac_is_context_desc;
+ hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
+
+ /* For Flow Control */
+ hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
+ hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
+
+ /* For Vlan related config */
+ hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
+ hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
+ hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
+ hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
+ hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
+
+ /* For RX coalescing */
+ hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
+ hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
+ hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
+ hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
+ hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
+ hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_ops->config_osp_mode = xlgmac_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
+ hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
+ hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
+ hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
+ hw_ops->config_pblx8 = xlgmac_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
+ hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
+ hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
+
+ /* For Receive Side Scaling */
+ hw_ops->enable_rss = xlgmac_enable_rss;
+ hw_ops->disable_rss = xlgmac_disable_rss;
+ hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
+ hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
new file mode 100644
index 000000000000..5e8428be3d66
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -0,0 +1,1334 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_one_poll(struct napi_struct *, int);
+static int xlgmac_all_poll(struct napi_struct *, int);
+
+static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
+{
+ return (ring->dma_desc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xlgmac_maybe_stop_tx_queue(
+ struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring,
+ unsigned int count)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+
+ if (count > xlgmac_tx_avail_desc(ring)) {
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_ops.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_prep_vlan(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ if (skb_vlan_tag_present(skb))
+ pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xlgmac_prep_tso(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ int ret;
+
+ if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->tcp_header_len = tcp_hdrlen(skb);
+ pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
+ pkt_info->mss = skb_shinfo(skb)->gso_size;
+
+ XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
+ XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
+ pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
+ XLGMAC_PR("mss=%u\n", pkt_info->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
+ pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
+
+ return 0;
+}
+
+static int xlgmac_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ return 1;
+}
+
+static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ pkt_info->skb = skb;
+
+ context_desc = 0;
+ pkt_info->desc_count = 0;
+
+ pkt_info->tx_packets = 1;
+ pkt_info->tx_bytes = skb->len;
+
+ if (xlgmac_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ pkt_info->desc_count++;
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
+ ~(XLGMAC_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->enable_int(channel, int_id);
+ }
+}
+
+static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->disable_int(channel, int_id);
+ }
+}
+
+static irqreturn_t xlgmac_isr(int irq, void *data)
+{
+ unsigned int dma_isr, dma_ch_isr, mac_isr;
+ struct xlgmac_pdata *pdata = data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int i, ti, ri;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = readl(pdata->mac_regs + DMA_ISR);
+ if (!dma_isr)
+ return IRQ_HANDLED;
+
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel_head + i;
+
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
+
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
+ DMA_CH_SR_TI_LEN);
+ ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
+ DMA_CH_SR_RI_LEN);
+ if (!pdata->per_channel_irq && (ti || ri)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&pdata->napi);
+ }
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
+ DMA_CH_SR_RBU_LEN))
+ pdata->stats.rx_buffer_unavailable++;
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
+ DMA_CH_SR_FBE_LEN))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
+ DMA_ISR_MACIS_LEN)) {
+ mac_isr = readl(pdata->mac_regs + MAC_ISR);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
+ MAC_ISR_MMCTXIS_LEN))
+ hw_ops->tx_mmc_int(pdata);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
+ MAC_ISR_MMCRXIS_LEN))
+ hw_ops->rx_mmc_int(pdata);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xlgmac_dma_isr(int irq, void *data)
+{
+ struct xlgmac_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xlgmac_tx_timer(unsigned long data)
+{
+ struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ disable_irq_nosync(channel->dma_irq);
+ else
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+}
+
+static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ setup_timer(&channel->tx_timer, xlgmac_tx_timer,
+ (unsigned long)channel);
+ }
+}
+
+static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ del_timer_sync(&channel->tx_timer);
+ }
+}
+
+static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xlgmac_one_poll,
+ NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xlgmac_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
+ IRQF_SHARED, netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xlgmac_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
+static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static int xlgmac_start(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ hw_ops->init(pdata);
+ xlgmac_napi_enable(pdata, 1);
+
+ ret = xlgmac_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
+ hw_ops->enable_tx(pdata);
+ hw_ops->enable_rx(pdata);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+
+err_napi:
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ return ret;
+}
+
+static void xlgmac_stop(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ netif_tx_stop_all_queues(netdev);
+ xlgmac_stop_timers(pdata);
+ hw_ops->disable_tx(pdata);
+ hw_ops->disable_rx(pdata);
+ xlgmac_free_irqs(pdata);
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+}
+
+static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
+{
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xlgmac_stop(pdata);
+
+ xlgmac_free_tx_data(pdata);
+ xlgmac_free_rx_data(pdata);
+
+ xlgmac_start(pdata);
+}
+
+static void xlgmac_restart(struct work_struct *work)
+{
+ struct xlgmac_pdata *pdata = container_of(work,
+ struct xlgmac_pdata,
+ restart_work);
+
+ rtnl_lock();
+
+ xlgmac_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static int xlgmac_open(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* TODO: Initialize the phy */
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ return ret;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the channels and rings */
+ ret = desc_ops->alloc_channles_and_rings(pdata);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&pdata->restart_work, xlgmac_restart);
+ xlgmac_init_timers(pdata);
+
+ ret = xlgmac_start(pdata);
+ if (ret)
+ goto err_channels_and_rings;
+
+ return 0;
+
+err_channels_and_rings:
+ desc_ops->free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_close(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* Stop the device */
+ xlgmac_stop(pdata);
+
+ /* Free the channels and rings */
+ desc_ops->free_channels_and_rings(pdata);
+
+ return 0;
+}
+
+static void xlgmac_tx_timeout(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
+static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_pkt_info *tx_pkt_info;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ struct xlgmac_ring *ring;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ XLGMAC_PR("skb->len = %d\n", skb->len);
+
+ channel = pdata->channel_head + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ tx_pkt_info = &ring->pkt_info;
+
+ if (skb->len == 0) {
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Prepare preliminary packet info for TX */
+ memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
+ xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
+
+ /* Check that there are enough descriptors available */
+ ret = xlgmac_maybe_stop_tx_queue(channel, ring,
+ tx_pkt_info->desc_count);
+ if (ret)
+ return ret;
+
+ ret = xlgmac_prep_tso(skb, tx_pkt_info);
+ if (ret) {
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ xlgmac_prep_vlan(skb, tx_pkt_info);
+
+ if (!desc_ops->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_ops->dev_xmit(channel);
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, true);
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
+
+ return NETDEV_TX_OK;
+}
+
+static void xlgmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_stats *pstats = &pdata->stats;
+
+ pdata->hw_ops.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+}
+
+static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_ops->set_mac_address(pdata, netdev->dev_addr);
+
+ return 0;
+}
+
+static int xlgmac_ioctl(struct net_device *netdev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ int ret;
+
+ ret = xlgmac_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xlgmac_restart_dev(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ set_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xlgmac_poll_controller(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xlgmac_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xlgmac_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int xlgmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_ops->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_ops->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_ops->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_ops->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_ops->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_ops->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_ops->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_ops->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ return 0;
+}
+
+static void xlgmac_set_rx_mode(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ hw_ops->config_rx_mode(pdata);
+}
+
+static const struct net_device_ops xlgmac_netdev_ops = {
+ .ndo_open = xlgmac_open,
+ .ndo_stop = xlgmac_close,
+ .ndo_start_xmit = xlgmac_xmit,
+ .ndo_tx_timeout = xlgmac_tx_timeout,
+ .ndo_get_stats64 = xlgmac_get_stats64,
+ .ndo_change_mtu = xlgmac_change_mtu,
+ .ndo_set_mac_address = xlgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xlgmac_ioctl,
+ .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xlgmac_poll_controller,
+#endif
+ .ndo_set_features = xlgmac_set_features,
+ .ndo_set_rx_mode = xlgmac_set_rx_mode,
+};
+
+const struct net_device_ops *xlgmac_get_netdev_ops(void)
+{
+ return &xlgmac_netdev_ops;
+}
+
+static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ while (ring->dirty != ring->cur) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset desc_data values */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+
+ if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
+
+ ring->dirty++;
+ }
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
+ struct napi_struct *napi,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int len)
+{
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ u8 *packet;
+
+ skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
+ desc_data->rx.hdr.dma_off,
+ desc_data->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ packet = page_address(desc_data->rx.hdr.pa.pages) +
+ desc_data->rx.hdr.pa.pages_offset;
+ copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
+ copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_range_for_cpu(pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ len, desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+
+ return skb;
+}
+
+static int xlgmac_tx_poll(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int cur;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
+ (ring->dirty != cur)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+ dma_desc = desc_data->dma_desc;
+
+ if (!hw_ops->tx_complete(dma_desc))
+ break;
+
+ /* Make sure descriptor fields are read after reading
+ * the OWN bit
+ */
+ dma_rmb();
+
+ if (netif_msg_tx_done(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
+
+ if (hw_ops->is_last_desc(dma_desc)) {
+ tx_packets += desc_data->tx.packets;
+ tx_bytes += desc_data->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ hw_ops->tx_desc_reset(desc_data);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ XLGMAC_PR("processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int len, dma_desc_len, max_len;
+ unsigned int context_next, context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int incomplete, error;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int received = 0;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ int packet_count = 0;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ incomplete = 0;
+ context_next = 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ pkt_info = &ring->pkt_info;
+ while (packet_count < budget) {
+ /* First time in loop see if we need to restore state */
+ if (!received && desc_data->state_saved) {
+ skb = desc_data->state.skb;
+ error = desc_data->state.error;
+ len = desc_data->state.len;
+ } else {
+ memset(pkt_info, 0, sizeof(*pkt_info));
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+
+ if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
+ xlgmac_rx_refresh(channel);
+
+ if (hw_ops->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ incomplete = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
+ context_next = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
+ context = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
+
+ /* Earlier error, just drain the remaining data */
+ if ((incomplete || context_next) && error)
+ goto read_again;
+
+ if (error || pkt_info->errors) {
+ if (pkt_info->errors)
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ /* Length is cumulative, get this descriptor's length */
+ dma_desc_len = desc_data->rx.len - len;
+ len += dma_desc_len;
+
+ if (dma_desc_len && !skb) {
+ skb = xlgmac_create_skb(pdata, napi, desc_data,
+ dma_desc_len);
+ if (!skb)
+ error = 1;
+ } else if (dma_desc_len) {
+ dma_sync_single_range_for_cpu(
+ pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(
+ skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ dma_desc_len,
+ desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+ }
+
+ if (incomplete || context_next)
+ goto read_again;
+
+ if (!skb)
+ goto next_packet;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, false);
+
+ skb_checksum_none_assert(skb);
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ pkt_info->vlan_ctag);
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
+ skb_set_hash(skb, pkt_info->rss_hash,
+ pkt_info->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ desc_data->state_saved = 1;
+ desc_data->state.skb = skb;
+ desc_data->state.len = len;
+ desc_data->state.error = error;
+ }
+
+ XLGMAC_PR("packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xlgmac_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_channel *channel = container_of(napi,
+ struct xlgmac_channel,
+ napi);
+ int processed = 0;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xlgmac_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_pdata *pdata = container_of(napi,
+ struct xlgmac_pdata,
+ napi);
+ struct xlgmac_channel *channel;
+ int processed, last_processed;
+ int ring_budget;
+ unsigned int i;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xlgmac_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ xlgmac_enable_rx_tx_ints(pdata);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
new file mode 100644
index 000000000000..504e80de7bba
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -0,0 +1,80 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ struct device *dev = &pcidev->dev;
+ struct xlgmac_resources res;
+ int i, ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret) {
+ dev_err(dev, "ERROR: failed to enable device\n");
+ return ret;
+ }
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pcidev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pcidev);
+
+ memset(&res, 0, sizeof(res));
+ res.irq = pcidev->irq;
+ res.addr = pcim_iomap_table(pcidev)[i];
+
+ return xlgmac_drv_probe(&pcidev->dev, &res);
+}
+
+static void xlgmac_remove(struct pci_dev *pcidev)
+{
+ xlgmac_drv_remove(&pcidev->dev);
+}
+
+static const struct pci_device_id xlgmac_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl);
+
+static struct pci_driver xlgmac_pci_driver = {
+ .name = XLGMAC_DRV_NAME,
+ .id_table = xlgmac_pci_tbl,
+ .probe = xlgmac_probe,
+ .remove = xlgmac_remove,
+};
+
+module_pci_driver(xlgmac_pci_driver);
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
new file mode 100644
index 000000000000..782448128a89
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
@@ -0,0 +1,746 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_REG_H__
+#define __DWC_XLGMAC_REG_H__
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_VR 0x0110
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_POS 18
+#define MAC_HWF0R_ADDMACADRSEL_LEN 5
+#define MAC_HWF0R_ARPOFFSEL_POS 9
+#define MAC_HWF0R_ARPOFFSEL_LEN 1
+#define MAC_HWF0R_EEESEL_POS 13
+#define MAC_HWF0R_EEESEL_LEN 1
+#define MAC_HWF0R_PHYIFSEL_POS 1
+#define MAC_HWF0R_PHYIFSEL_LEN 2
+#define MAC_HWF0R_MGKSEL_POS 7
+#define MAC_HWF0R_MGKSEL_LEN 1
+#define MAC_HWF0R_MMCSEL_POS 8
+#define MAC_HWF0R_MMCSEL_LEN 1
+#define MAC_HWF0R_RWKSEL_POS 6
+#define MAC_HWF0R_RWKSEL_LEN 1
+#define MAC_HWF0R_RXCOESEL_POS 16
+#define MAC_HWF0R_RXCOESEL_LEN 1
+#define MAC_HWF0R_SAVLANINS_POS 27
+#define MAC_HWF0R_SAVLANINS_LEN 1
+#define MAC_HWF0R_SMASEL_POS 5
+#define MAC_HWF0R_SMASEL_LEN 1
+#define MAC_HWF0R_TSSEL_POS 12
+#define MAC_HWF0R_TSSEL_LEN 1
+#define MAC_HWF0R_TSSTSSEL_POS 25
+#define MAC_HWF0R_TSSTSSEL_LEN 2
+#define MAC_HWF0R_TXCOESEL_POS 14
+#define MAC_HWF0R_TXCOESEL_LEN 1
+#define MAC_HWF0R_VLHASH_POS 4
+#define MAC_HWF0R_VLHASH_LEN 1
+#define MAC_HWF1R_ADDR64_POS 14
+#define MAC_HWF1R_ADDR64_LEN 2
+#define MAC_HWF1R_ADVTHWORD_POS 13
+#define MAC_HWF1R_ADVTHWORD_LEN 1
+#define MAC_HWF1R_DBGMEMA_POS 19
+#define MAC_HWF1R_DBGMEMA_LEN 1
+#define MAC_HWF1R_DCBEN_POS 16
+#define MAC_HWF1R_DCBEN_LEN 1
+#define MAC_HWF1R_HASHTBLSZ_POS 24
+#define MAC_HWF1R_HASHTBLSZ_LEN 3
+#define MAC_HWF1R_L3L4FNUM_POS 27
+#define MAC_HWF1R_L3L4FNUM_LEN 4
+#define MAC_HWF1R_NUMTC_POS 21
+#define MAC_HWF1R_NUMTC_LEN 3
+#define MAC_HWF1R_RSSEN_POS 20
+#define MAC_HWF1R_RSSEN_LEN 1
+#define MAC_HWF1R_RXFIFOSIZE_POS 0
+#define MAC_HWF1R_RXFIFOSIZE_LEN 5
+#define MAC_HWF1R_SPHEN_POS 17
+#define MAC_HWF1R_SPHEN_LEN 1
+#define MAC_HWF1R_TSOEN_POS 18
+#define MAC_HWF1R_TSOEN_LEN 1
+#define MAC_HWF1R_TXFIFOSIZE_POS 6
+#define MAC_HWF1R_TXFIFOSIZE_LEN 5
+#define MAC_HWF2R_AUXSNAPNUM_POS 28
+#define MAC_HWF2R_AUXSNAPNUM_LEN 3
+#define MAC_HWF2R_PPSOUTNUM_POS 24
+#define MAC_HWF2R_PPSOUTNUM_LEN 3
+#define MAC_HWF2R_RXCHCNT_POS 12
+#define MAC_HWF2R_RXCHCNT_LEN 4
+#define MAC_HWF2R_RXQCNT_POS 0
+#define MAC_HWF2R_RXQCNT_LEN 4
+#define MAC_HWF2R_TXCHCNT_POS 18
+#define MAC_HWF2R_TXCHCNT_LEN 4
+#define MAC_HWF2R_TXQCNT_POS 6
+#define MAC_HWF2R_TXQCNT_LEN 4
+#define MAC_IER_TSIE_POS 12
+#define MAC_IER_TSIE_LEN 1
+#define MAC_ISR_MMCRXIS_POS 9
+#define MAC_ISR_MMCRXIS_LEN 1
+#define MAC_ISR_MMCTXIS_POS 10
+#define MAC_ISR_MMCTXIS_LEN 1
+#define MAC_ISR_PMTIS_POS 4
+#define MAC_ISR_PMTIS_LEN 1
+#define MAC_ISR_TSIS_POS 12
+#define MAC_ISR_TSIS_LEN 1
+#define MAC_MACA1HR_AE_POS 31
+#define MAC_MACA1HR_AE_LEN 1
+#define MAC_PFR_HMC_POS 2
+#define MAC_PFR_HMC_LEN 1
+#define MAC_PFR_HPF_POS 10
+#define MAC_PFR_HPF_LEN 1
+#define MAC_PFR_HUC_POS 1
+#define MAC_PFR_HUC_LEN 1
+#define MAC_PFR_PM_POS 4
+#define MAC_PFR_PM_LEN 1
+#define MAC_PFR_PR_POS 0
+#define MAC_PFR_PR_LEN 1
+#define MAC_PFR_VTFE_POS 16
+#define MAC_PFR_VTFE_LEN 1
+#define MAC_Q0TFCR_PT_POS 16
+#define MAC_Q0TFCR_PT_LEN 16
+#define MAC_Q0TFCR_TFE_POS 1
+#define MAC_Q0TFCR_TFE_LEN 1
+#define MAC_RCR_ACS_POS 1
+#define MAC_RCR_ACS_LEN 1
+#define MAC_RCR_CST_POS 2
+#define MAC_RCR_CST_LEN 1
+#define MAC_RCR_DCRCC_POS 3
+#define MAC_RCR_DCRCC_LEN 1
+#define MAC_RCR_HDSMS_POS 12
+#define MAC_RCR_HDSMS_LEN 3
+#define MAC_RCR_IPC_POS 9
+#define MAC_RCR_IPC_LEN 1
+#define MAC_RCR_JE_POS 8
+#define MAC_RCR_JE_LEN 1
+#define MAC_RCR_LM_POS 10
+#define MAC_RCR_LM_LEN 1
+#define MAC_RCR_RE_POS 0
+#define MAC_RCR_RE_LEN 1
+#define MAC_RFCR_PFCE_POS 8
+#define MAC_RFCR_PFCE_LEN 1
+#define MAC_RFCR_RFE_POS 0
+#define MAC_RFCR_RFE_LEN 1
+#define MAC_RFCR_UP_POS 1
+#define MAC_RFCR_UP_LEN 1
+#define MAC_RQC0R_RXQ0EN_POS 0
+#define MAC_RQC0R_RXQ0EN_LEN 2
+#define MAC_RSSAR_ADDRT_POS 2
+#define MAC_RSSAR_ADDRT_LEN 1
+#define MAC_RSSAR_CT_POS 1
+#define MAC_RSSAR_CT_LEN 1
+#define MAC_RSSAR_OB_POS 0
+#define MAC_RSSAR_OB_LEN 1
+#define MAC_RSSAR_RSSIA_POS 8
+#define MAC_RSSAR_RSSIA_LEN 8
+#define MAC_RSSCR_IP2TE_POS 1
+#define MAC_RSSCR_IP2TE_LEN 1
+#define MAC_RSSCR_RSSE_POS 0
+#define MAC_RSSCR_RSSE_LEN 1
+#define MAC_RSSCR_TCP4TE_POS 2
+#define MAC_RSSCR_TCP4TE_LEN 1
+#define MAC_RSSCR_UDP4TE_POS 3
+#define MAC_RSSCR_UDP4TE_LEN 1
+#define MAC_RSSDR_DMCH_POS 0
+#define MAC_RSSDR_DMCH_LEN 4
+#define MAC_TCR_SS_POS 28
+#define MAC_TCR_SS_LEN 3
+#define MAC_TCR_TE_POS 0
+#define MAC_TCR_TE_LEN 1
+#define MAC_VLANHTR_VLHT_POS 0
+#define MAC_VLANHTR_VLHT_LEN 16
+#define MAC_VLANIR_VLTI_POS 20
+#define MAC_VLANIR_VLTI_LEN 1
+#define MAC_VLANIR_CSVL_POS 19
+#define MAC_VLANIR_CSVL_LEN 1
+#define MAC_VLANTR_DOVLTC_POS 20
+#define MAC_VLANTR_DOVLTC_LEN 1
+#define MAC_VLANTR_ERSVLM_POS 19
+#define MAC_VLANTR_ERSVLM_LEN 1
+#define MAC_VLANTR_ESVL_POS 18
+#define MAC_VLANTR_ESVL_LEN 1
+#define MAC_VLANTR_ETV_POS 16
+#define MAC_VLANTR_ETV_LEN 1
+#define MAC_VLANTR_EVLS_POS 21
+#define MAC_VLANTR_EVLS_LEN 2
+#define MAC_VLANTR_EVLRXS_POS 24
+#define MAC_VLANTR_EVLRXS_LEN 1
+#define MAC_VLANTR_VL_POS 0
+#define MAC_VLANTR_VL_LEN 16
+#define MAC_VLANTR_VTHM_POS 25
+#define MAC_VLANTR_VTHM_LEN 1
+#define MAC_VLANTR_VTIM_POS 17
+#define MAC_VLANTR_VTIM_LEN 1
+#define MAC_VR_DEVID_POS 8
+#define MAC_VR_DEVID_LEN 8
+#define MAC_VR_SNPSVER_POS 0
+#define MAC_VR_SNPSVER_LEN 8
+#define MAC_VR_USERVER_POS 16
+#define MAC_VR_USERVER_LEN 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_POS 0
+#define MMC_CR_CR_LEN 1
+#define MMC_CR_CSR_POS 1
+#define MMC_CR_CSR_LEN 1
+#define MMC_CR_ROR_POS 2
+#define MMC_CR_ROR_LEN 1
+#define MMC_CR_MCF_POS 3
+#define MMC_CR_MCF_LEN 1
+#define MMC_CR_MCT_POS 4
+#define MMC_CR_MCT_LEN 2
+#define MMC_RIER_ALL_INTERRUPTS_POS 0
+#define MMC_RIER_ALL_INTERRUPTS_LEN 23
+#define MMC_RISR_RXFRAMECOUNT_GB_POS 0
+#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_GB_POS 1
+#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_G_POS 2
+#define MMC_RISR_RXOCTETCOUNT_G_LEN 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXCRCERROR_POS 5
+#define MMC_RISR_RXCRCERROR_LEN 1
+#define MMC_RISR_RXRUNTERROR_POS 6
+#define MMC_RISR_RXRUNTERROR_LEN 1
+#define MMC_RISR_RXJABBERERROR_POS 7
+#define MMC_RISR_RXJABBERERROR_LEN 1
+#define MMC_RISR_RXUNDERSIZE_G_POS 8
+#define MMC_RISR_RXUNDERSIZE_G_LEN 1
+#define MMC_RISR_RXOVERSIZE_G_POS 9
+#define MMC_RISR_RXOVERSIZE_G_LEN 1
+#define MMC_RISR_RX64OCTETS_GB_POS 10
+#define MMC_RISR_RX64OCTETS_GB_LEN 1
+#define MMC_RISR_RX65TO127OCTETS_GB_POS 11
+#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1
+#define MMC_RISR_RX128TO255OCTETS_GB_POS 12
+#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1
+#define MMC_RISR_RX256TO511OCTETS_GB_POS 13
+#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_POS 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_RISR_RXUNICASTFRAMES_G_POS 16
+#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXLENGTHERROR_POS 17
+#define MMC_RISR_RXLENGTHERROR_LEN 1
+#define MMC_RISR_RXOUTOFRANGETYPE_POS 18
+#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1
+#define MMC_RISR_RXPAUSEFRAMES_POS 19
+#define MMC_RISR_RXPAUSEFRAMES_LEN 1
+#define MMC_RISR_RXFIFOOVERFLOW_POS 20
+#define MMC_RISR_RXFIFOOVERFLOW_LEN 1
+#define MMC_RISR_RXVLANFRAMES_GB_POS 21
+#define MMC_RISR_RXVLANFRAMES_GB_LEN 1
+#define MMC_RISR_RXWATCHDOGERROR_POS 22
+#define MMC_RISR_RXWATCHDOGERROR_LEN 1
+#define MMC_TIER_ALL_INTERRUPTS_POS 0
+#define MMC_TIER_ALL_INTERRUPTS_LEN 18
+#define MMC_TISR_TXOCTETCOUNT_GB_POS 0
+#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_GB_POS 1
+#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1
+#define MMC_TISR_TX64OCTETS_GB_POS 4
+#define MMC_TISR_TX64OCTETS_GB_LEN 1
+#define MMC_TISR_TX65TO127OCTETS_GB_POS 5
+#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1
+#define MMC_TISR_TX128TO255OCTETS_GB_POS 6
+#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1
+#define MMC_TISR_TX256TO511OCTETS_GB_POS 7
+#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXUNDERFLOWERROR_POS 13
+#define MMC_TISR_TXUNDERFLOWERROR_LEN 1
+#define MMC_TISR_TXOCTETCOUNT_G_POS 14
+#define MMC_TISR_TXOCTETCOUNT_G_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_G_POS 15
+#define MMC_TISR_TXFRAMECOUNT_G_LEN 1
+#define MMC_TISR_TXPAUSEFRAMES_POS 16
+#define MMC_TISR_TXPAUSEFRAMES_LEN 1
+#define MMC_TISR_TXVLANFRAMES_G_POS 17
+#define MMC_TISR_TXVLANFRAMES_G_LEN 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDDR 0x1010
+#define MTL_RQDCM0R 0x1030
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_POS 5
+#define MTL_OMR_ETSALG_LEN 2
+#define MTL_OMR_RAA_POS 2
+#define MTL_OMR_RAA_LEN 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_POS 16
+#define MTL_Q_RQDR_PRXQ_LEN 14
+#define MTL_Q_RQDR_RXQSTS_POS 4
+#define MTL_Q_RQDR_RXQSTS_LEN 2
+#define MTL_Q_RQFCR_RFA_POS 1
+#define MTL_Q_RQFCR_RFA_LEN 6
+#define MTL_Q_RQFCR_RFD_POS 17
+#define MTL_Q_RQFCR_RFD_LEN 6
+#define MTL_Q_RQOMR_EHFC_POS 7
+#define MTL_Q_RQOMR_EHFC_LEN 1
+#define MTL_Q_RQOMR_RQS_POS 16
+#define MTL_Q_RQOMR_RQS_LEN 9
+#define MTL_Q_RQOMR_RSF_POS 5
+#define MTL_Q_RQOMR_RSF_LEN 1
+#define MTL_Q_RQOMR_FEP_POS 4
+#define MTL_Q_RQOMR_FEP_LEN 1
+#define MTL_Q_RQOMR_FUP_POS 3
+#define MTL_Q_RQOMR_FUP_LEN 1
+#define MTL_Q_RQOMR_RTC_POS 0
+#define MTL_Q_RQOMR_RTC_LEN 2
+#define MTL_Q_TQOMR_FTQ_POS 0
+#define MTL_Q_TQOMR_FTQ_LEN 1
+#define MTL_Q_TQOMR_Q2TCMAP_POS 8
+#define MTL_Q_TQOMR_Q2TCMAP_LEN 3
+#define MTL_Q_TQOMR_TQS_POS 16
+#define MTL_Q_TQOMR_TQS_LEN 10
+#define MTL_Q_TQOMR_TSF_POS 1
+#define MTL_Q_TQOMR_TSF_LEN 1
+#define MTL_Q_TQOMR_TTC_POS 4
+#define MTL_Q_TQOMR_TTC_LEN 3
+#define MTL_Q_TQOMR_TXQEN_POS 2
+#define MTL_Q_TQOMR_TXQEN_LEN 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+#define MTL_RQDCM0R_Q0MDMACH 0x0
+#define MTL_RQDCM0R_Q1MDMACH 0x00000100
+#define MTL_RQDCM0R_Q2MDMACH 0x00020000
+#define MTL_RQDCM0R_Q3MDMACH 0x03000000
+#define MTL_RQDCM1R_Q4MDMACH 0x00000004
+#define MTL_RQDCM1R_Q5MDMACH 0x00000500
+#define MTL_RQDCM1R_Q6MDMACH 0x00060000
+#define MTL_RQDCM1R_Q7MDMACH 0x07000000
+#define MTL_RQDCM2R_Q8MDMACH 0x00000008
+#define MTL_RQDCM2R_Q9MDMACH 0x00000900
+#define MTL_RQDCM2R_Q10MDMACH 0x000A0000
+#define MTL_RQDCM2R_Q11MDMACH 0x0B000000
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_POS 0
+#define MTL_TC_ETSCR_TSA_LEN 2
+#define MTL_TC_QWR_QW_POS 0
+#define MTL_TC_QWR_QW_LEN 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_ISR_MACIS_POS 17
+#define DMA_ISR_MACIS_LEN 1
+#define DMA_ISR_MTLIS_POS 16
+#define DMA_ISR_MTLIS_LEN 1
+#define DMA_MR_SWR_POS 0
+#define DMA_MR_SWR_LEN 1
+#define DMA_SBMR_EAME_POS 11
+#define DMA_SBMR_EAME_LEN 1
+#define DMA_SBMR_BLEN_64_POS 5
+#define DMA_SBMR_BLEN_64_LEN 1
+#define DMA_SBMR_BLEN_128_POS 6
+#define DMA_SBMR_BLEN_128_LEN 1
+#define DMA_SBMR_BLEN_256_POS 7
+#define DMA_SBMR_BLEN_256_LEN 1
+#define DMA_SBMR_UNDEF_POS 0
+#define DMA_SBMR_UNDEF_LEN 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_LEN 4
+#define DMA_DSR_TPS_LEN 4
+#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN)
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_POS 16
+#define DMA_CH_CR_PBLX8_LEN 1
+#define DMA_CH_CR_SPH_POS 24
+#define DMA_CH_CR_SPH_LEN 1
+#define DMA_CH_IER_AIE_POS 15
+#define DMA_CH_IER_AIE_LEN 1
+#define DMA_CH_IER_FBEE_POS 12
+#define DMA_CH_IER_FBEE_LEN 1
+#define DMA_CH_IER_NIE_POS 16
+#define DMA_CH_IER_NIE_LEN 1
+#define DMA_CH_IER_RBUE_POS 7
+#define DMA_CH_IER_RBUE_LEN 1
+#define DMA_CH_IER_RIE_POS 6
+#define DMA_CH_IER_RIE_LEN 1
+#define DMA_CH_IER_RSE_POS 8
+#define DMA_CH_IER_RSE_LEN 1
+#define DMA_CH_IER_TBUE_POS 2
+#define DMA_CH_IER_TBUE_LEN 1
+#define DMA_CH_IER_TIE_POS 0
+#define DMA_CH_IER_TIE_LEN 1
+#define DMA_CH_IER_TXSE_POS 1
+#define DMA_CH_IER_TXSE_LEN 1
+#define DMA_CH_RCR_PBL_POS 16
+#define DMA_CH_RCR_PBL_LEN 6
+#define DMA_CH_RCR_RBSZ_POS 1
+#define DMA_CH_RCR_RBSZ_LEN 14
+#define DMA_CH_RCR_SR_POS 0
+#define DMA_CH_RCR_SR_LEN 1
+#define DMA_CH_RIWT_RWT_POS 0
+#define DMA_CH_RIWT_RWT_LEN 8
+#define DMA_CH_SR_FBE_POS 12
+#define DMA_CH_SR_FBE_LEN 1
+#define DMA_CH_SR_RBU_POS 7
+#define DMA_CH_SR_RBU_LEN 1
+#define DMA_CH_SR_RI_POS 6
+#define DMA_CH_SR_RI_LEN 1
+#define DMA_CH_SR_RPS_POS 8
+#define DMA_CH_SR_RPS_LEN 1
+#define DMA_CH_SR_TBU_POS 2
+#define DMA_CH_SR_TBU_LEN 1
+#define DMA_CH_SR_TI_POS 0
+#define DMA_CH_SR_TI_LEN 1
+#define DMA_CH_SR_TPS_POS 1
+#define DMA_CH_SR_TPS_LEN 1
+#define DMA_CH_TCR_OSP_POS 4
+#define DMA_CH_TCR_OSP_LEN 1
+#define DMA_CH_TCR_PBL_POS 16
+#define DMA_CH_TCR_PBL_LEN 6
+#define DMA_CH_TCR_ST_POS 0
+#define DMA_CH_TCR_ST_LEN 1
+#define DMA_CH_TCR_TSE_POS 12
+#define DMA_CH_TCR_TSE_LEN 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64
+#define DMA_PBL_128 128
+#define DMA_PBL_256 256
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_POS 2
+#define RX_PACKET_ERRORS_CRC_LEN 1
+#define RX_PACKET_ERRORS_FRAME_POS 3
+#define RX_PACKET_ERRORS_FRAME_LEN 1
+#define RX_PACKET_ERRORS_LENGTH_POS 0
+#define RX_PACKET_ERRORS_LENGTH_LEN 1
+#define RX_PACKET_ERRORS_OVERRUN_POS 1
+#define RX_PACKET_ERRORS_OVERRUN_LEN 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1
+
+#define RX_NORMAL_DESC0_OVT_POS 0
+#define RX_NORMAL_DESC0_OVT_LEN 16
+#define RX_NORMAL_DESC2_HL_POS 0
+#define RX_NORMAL_DESC2_HL_LEN 10
+#define RX_NORMAL_DESC3_CDA_POS 27
+#define RX_NORMAL_DESC3_CDA_LEN 1
+#define RX_NORMAL_DESC3_CTXT_POS 30
+#define RX_NORMAL_DESC3_CTXT_LEN 1
+#define RX_NORMAL_DESC3_ES_POS 15
+#define RX_NORMAL_DESC3_ES_LEN 1
+#define RX_NORMAL_DESC3_ETLT_POS 16
+#define RX_NORMAL_DESC3_ETLT_LEN 4
+#define RX_NORMAL_DESC3_FD_POS 29
+#define RX_NORMAL_DESC3_FD_LEN 1
+#define RX_NORMAL_DESC3_INTE_POS 30
+#define RX_NORMAL_DESC3_INTE_LEN 1
+#define RX_NORMAL_DESC3_L34T_POS 20
+#define RX_NORMAL_DESC3_L34T_LEN 4
+#define RX_NORMAL_DESC3_LD_POS 28
+#define RX_NORMAL_DESC3_LD_LEN 1
+#define RX_NORMAL_DESC3_OWN_POS 31
+#define RX_NORMAL_DESC3_OWN_LEN 1
+#define RX_NORMAL_DESC3_PL_POS 0
+#define RX_NORMAL_DESC3_PL_LEN 14
+#define RX_NORMAL_DESC3_RSV_POS 26
+#define RX_NORMAL_DESC3_RSV_LEN 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_POS 4
+#define RX_CONTEXT_DESC3_TSA_LEN 1
+#define RX_CONTEXT_DESC3_TSD_POS 6
+#define RX_CONTEXT_DESC3_TSD_LEN 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define TX_PACKET_ATTRIBUTES_PTP_POS 3
+#define TX_PACKET_ATTRIBUTES_PTP_LEN 1
+
+#define TX_CONTEXT_DESC2_MSS_POS 0
+#define TX_CONTEXT_DESC2_MSS_LEN 15
+#define TX_CONTEXT_DESC3_CTXT_POS 30
+#define TX_CONTEXT_DESC3_CTXT_LEN 1
+#define TX_CONTEXT_DESC3_TCMSSV_POS 26
+#define TX_CONTEXT_DESC3_TCMSSV_LEN 1
+#define TX_CONTEXT_DESC3_VLTV_POS 16
+#define TX_CONTEXT_DESC3_VLTV_LEN 1
+#define TX_CONTEXT_DESC3_VT_POS 0
+#define TX_CONTEXT_DESC3_VT_LEN 16
+
+#define TX_NORMAL_DESC2_HL_B1L_POS 0
+#define TX_NORMAL_DESC2_HL_B1L_LEN 14
+#define TX_NORMAL_DESC2_IC_POS 31
+#define TX_NORMAL_DESC2_IC_LEN 1
+#define TX_NORMAL_DESC2_TTSE_POS 30
+#define TX_NORMAL_DESC2_TTSE_LEN 1
+#define TX_NORMAL_DESC2_VTIR_POS 14
+#define TX_NORMAL_DESC2_VTIR_LEN 2
+#define TX_NORMAL_DESC3_CIC_POS 16
+#define TX_NORMAL_DESC3_CIC_LEN 2
+#define TX_NORMAL_DESC3_CPC_POS 26
+#define TX_NORMAL_DESC3_CPC_LEN 2
+#define TX_NORMAL_DESC3_CTXT_POS 30
+#define TX_NORMAL_DESC3_CTXT_LEN 1
+#define TX_NORMAL_DESC3_FD_POS 29
+#define TX_NORMAL_DESC3_FD_LEN 1
+#define TX_NORMAL_DESC3_FL_POS 0
+#define TX_NORMAL_DESC3_FL_LEN 15
+#define TX_NORMAL_DESC3_LD_POS 28
+#define TX_NORMAL_DESC3_LD_LEN 1
+#define TX_NORMAL_DESC3_OWN_POS 31
+#define TX_NORMAL_DESC3_OWN_LEN 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4
+#define TX_NORMAL_DESC3_TCPPL_POS 0
+#define TX_NORMAL_DESC3_TCPPL_LEN 18
+#define TX_NORMAL_DESC3_TSE_POS 18
+#define TX_NORMAL_DESC3_TSE_LEN 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+#define XLGMAC_MTL_REG(pdata, n, reg) \
+ ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg))
+
+#define XLGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg))
+
+#endif /* __DWC_XLGMAC_REG_H__ */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
new file mode 100644
index 000000000000..7a4dc643b2b9
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -0,0 +1,651 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_H__
+#define __DWC_XLGMAC_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/timecounter.h>
+
+#define XLGMAC_DRV_NAME "dwc-xlgmac"
+#define XLGMAC_DRV_VERSION "1.0.0"
+#define XLGMAC_DRV_DESC "Synopsys DWC XLGMAC Driver"
+
+/* Descriptor related parameters */
+#define XLGMAC_TX_DESC_CNT 1024
+#define XLGMAC_TX_DESC_MIN_FREE (XLGMAC_TX_DESC_CNT >> 3)
+#define XLGMAC_TX_DESC_MAX_PROC (XLGMAC_TX_DESC_CNT >> 1)
+#define XLGMAC_RX_DESC_CNT 1024
+#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
+
+/* Descriptors required for maximum contiguous TSO/GSO packet */
+#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for a SKB */
+#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
+
+#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XLGMAC_RX_BUF_ALIGN 64
+
+/* Maximum Size for Splitting the Header Data
+ * Keep in sync with SKB_ALLOC_SIZE
+ * 3'b000: 64 bytes, 3'b001: 128 bytes
+ * 3'b010: 256 bytes, 3'b011: 512 bytes
+ * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved
+ */
+#define XLGMAC_SPH_HDSMS_SIZE 3
+#define XLGMAC_SKB_ALLOC_SIZE 512
+
+#define XLGMAC_MAX_FIFO 81920
+
+#define XLGMAC_MAX_DMA_CHANNELS 16
+#define XLGMAC_DMA_STOP_TIMEOUT 5
+#define XLGMAC_DMA_INTERRUPT_MASK 0x31c7
+
+/* Default coalescing parameters */
+#define XLGMAC_INIT_DMA_TX_USECS 1000
+#define XLGMAC_INIT_DMA_TX_FRAMES 25
+#define XLGMAC_INIT_DMA_RX_USECS 30
+#define XLGMAC_INIT_DMA_RX_FRAMES 25
+
+/* Flow control queue count */
+#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* System clock is 125 MHz */
+#define XLGMAC_SYSCLOCK 125000000
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XLGMAC_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XLGMAC_RSS_HASH_KEY_SIZE 40
+#define XLGMAC_RSS_MAX_TABLE_SIZE 256
+#define XLGMAC_RSS_LOOKUP_TABLE_TYPE 0
+#define XLGMAC_RSS_HASH_KEY_TYPE 1
+
+#define XLGMAC_STD_PACKET_MTU 1500
+#define XLGMAC_JUMBO_PACKET_MTU 9000
+
+/* Helper macro for descriptor handling
+ * Always use XLGMAC_GET_DESC_DATA to access the descriptor data
+ */
+#define XLGMAC_GET_DESC_DATA(ring, idx) ({ \
+ typeof(ring) _ring = (ring); \
+ ((_ring)->desc_data_head + \
+ ((idx) & ((_ring)->dma_desc_count - 1))); \
+})
+
+#define XLGMAC_GET_REG_BITS(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(var) _var = le32_to_cpu((var)); \
+ ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+})
+
+#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+ cpu_to_le32(_var); \
+})
+
+struct xlgmac_pdata;
+
+enum xlgmac_int {
+ XLGMAC_INT_DMA_CH_SR_TI,
+ XLGMAC_INT_DMA_CH_SR_TPS,
+ XLGMAC_INT_DMA_CH_SR_TBU,
+ XLGMAC_INT_DMA_CH_SR_RI,
+ XLGMAC_INT_DMA_CH_SR_RBU,
+ XLGMAC_INT_DMA_CH_SR_RPS,
+ XLGMAC_INT_DMA_CH_SR_TI_RI,
+ XLGMAC_INT_DMA_CH_SR_FBE,
+ XLGMAC_INT_DMA_ALL,
+};
+
+struct xlgmac_stats {
+ /* MMC TX counters */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* MMC RX counters */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+
+ /* Extra counters */
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+ u64 rx_buffer_unavailable;
+};
+
+struct xlgmac_ring_buf {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_len;
+};
+
+/* Common Tx and Rx DMA hardware descriptor */
+struct xlgmac_dma_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xlgmac_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xlgmac_buffer_data {
+ struct xlgmac_page_alloc pa;
+ struct xlgmac_page_alloc pa_unmap;
+
+ dma_addr_t dma_base;
+ unsigned long dma_off;
+ unsigned int dma_len;
+};
+
+/* Tx-related desc data */
+struct xlgmac_tx_desc_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related desc data */
+struct xlgmac_rx_desc_data {
+ struct xlgmac_buffer_data hdr; /* Header locations */
+ struct xlgmac_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+struct xlgmac_pkt_info {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ /* descriptors needed for this packet */
+ unsigned int desc_count;
+ unsigned int length;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+};
+
+struct xlgmac_desc_data {
+ /* dma_desc: Virtual address of descriptor
+ * dma_desc_addr: DMA address of descriptor
+ */
+ struct xlgmac_dma_desc *dma_desc;
+ dma_addr_t dma_desc_addr;
+
+ /* skb: Virtual address of SKB
+ * skb_dma: DMA address of SKB data
+ * skb_dma_len: Length of SKB DMA area
+ */
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_dma_len;
+
+ /* Tx/Rx -related data */
+ struct xlgmac_tx_desc_data tx;
+ struct xlgmac_rx_desc_data rx;
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xlgmac_ring {
+ /* Per packet related information */
+ struct xlgmac_pkt_info pkt_info;
+
+ /* Virtual/DMA addresses of DMA descriptor list and the total count */
+ struct xlgmac_dma_desc *dma_desc_head;
+ dma_addr_t dma_desc_head_addr;
+ unsigned int dma_desc_count;
+
+ /* Array of descriptor data corresponding the DMA descriptor
+ * (always use the XLGMAC_GET_DESC_DATA macro to access this data)
+ */
+ struct xlgmac_desc_data *desc_data_head;
+
+ /* Page allocation for RX buffers */
+ struct xlgmac_page_alloc rx_hdr_pa;
+ struct xlgmac_page_alloc rx_buf_pa;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int xmit_more;
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+struct xlgmac_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xlgmac_pdata *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct timer_list tx_timer;
+
+ struct xlgmac_ring *tx_ring;
+ struct xlgmac_ring *rx_ring;
+} ____cacheline_aligned;
+
+struct xlgmac_desc_ops {
+ int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+ void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
+ int (*map_tx_skb)(struct xlgmac_channel *channel,
+ struct sk_buff *skb);
+ int (*map_rx_buffer)(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data);
+ void (*unmap_desc_data)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data);
+ void (*tx_desc_init)(struct xlgmac_pdata *pdata);
+ void (*rx_desc_init)(struct xlgmac_pdata *pdata);
+};
+
+struct xlgmac_hw_ops {
+ int (*init)(struct xlgmac_pdata *pdata);
+ int (*exit)(struct xlgmac_pdata *pdata);
+
+ int (*tx_complete)(struct xlgmac_dma_desc *dma_desc);
+
+ void (*enable_tx)(struct xlgmac_pdata *pdata);
+ void (*disable_tx)(struct xlgmac_pdata *pdata);
+ void (*enable_rx)(struct xlgmac_pdata *pdata);
+ void (*disable_rx)(struct xlgmac_pdata *pdata);
+
+ int (*enable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ int (*disable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ void (*dev_xmit)(struct xlgmac_channel *channel);
+ int (*dev_read)(struct xlgmac_channel *channel);
+
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*config_rx_mode)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
+
+ /* For MII speed configuration */
+ int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata);
+
+ /* For descriptor related operation */
+ void (*tx_desc_init)(struct xlgmac_channel *channel);
+ void (*rx_desc_init)(struct xlgmac_channel *channel);
+ void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data);
+ void (*rx_desc_reset)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index);
+ int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc);
+ int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc);
+ void (*tx_start_xmit)(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring);
+
+ /* For Flow Control */
+ int (*config_tx_flow_control)(struct xlgmac_pdata *pdata);
+ int (*config_rx_flow_control)(struct xlgmac_pdata *pdata);
+
+ /* For Vlan related config */
+ int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xlgmac_pdata *pdata);
+ int (*config_tx_coalesce)(struct xlgmac_pdata *pdata);
+ unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata,
+ unsigned int usec);
+ unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata,
+ unsigned int riwt);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xlgmac_pdata *pdata);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_pblx8)(struct xlgmac_pdata *pdata);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*tx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*read_mmc_stats)(struct xlgmac_pdata *pdata);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xlgmac_pdata *pdata);
+ int (*disable_rss)(struct xlgmac_pdata *pdata);
+ int (*set_rss_hash_key)(struct xlgmac_pdata *pdata,
+ const u8 *key);
+ int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata,
+ const u32 *table);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xlgmac_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int phyifsel; /* PHY interface support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xlgmac_resources {
+ void __iomem *addr;
+ int irq;
+};
+
+struct xlgmac_pdata {
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct xlgmac_hw_ops hw_ops;
+ struct xlgmac_desc_ops desc_ops;
+
+ /* Device statistics */
+ struct xlgmac_stats stats;
+
+ u32 msg_enable;
+
+ /* MAC registers base */
+ void __iomem *mac_regs;
+
+ /* Hardware features of the device */
+ struct xlgmac_hw_features hw_feat;
+
+ struct work_struct restart_work;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xlgmac_channel *channel_head;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_desc_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Device interrupt number */
+ int dev_irq;
+ unsigned int per_channel_irq;
+ int channel_irq[XLGMAC_MAX_DMA_CHANNELS];
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ int phy_speed;
+
+ char drv_name[32];
+ char drv_ver[32];
+};
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops);
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops);
+const struct net_device_ops *xlgmac_get_netdev_ops(void);
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag);
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx);
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx);
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata);
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata);
+int xlgmac_drv_probe(struct device *dev,
+ struct xlgmac_resources *res);
+int xlgmac_drv_remove(struct device *dev);
+
+/* For debug prints */
+#ifdef XLGMAC_DEBUG
+#define XLGMAC_PR(fmt, args...) \
+ pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args)
+#else
+#define XLGMAC_PR(x...) do { } while (0)
+#endif
+
+#endif /* __DWC_XLGMAC_H__ */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 72013314bba8..fa6a06571187 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int gelic_ether_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int gelic_ether_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
+ u32 supported, advertising;
gelic_card_get_ether_port_status(card, 0);
if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
case GELIC_LV1_ETHER_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
case GELIC_LV1_ETHER_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case GELIC_LV1_ETHER_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
default:
pr_info("%s: speed unknown\n", __func__);
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
}
- cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
+ supported = SUPPORTED_TP | SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full;
- cmd->advertising = cmd->supported;
+ advertising = supported;
if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising &= ~ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ advertising &= ~ADVERTISED_Autoneg;
}
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gelic_ether_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int
+gelic_ether_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
u64 mode;
int ret;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
mode = GELIC_LV1_ETHER_AUTO_NEG;
} else {
- switch (cmd->speed) {
+ switch (cmd->base.speed) {
case SPEED_10:
mode = GELIC_LV1_ETHER_SPEED_10;
break;
@@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev,
default:
return -EINVAL;
}
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL) {
mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
- else if (cmd->speed == SPEED_1000) {
+ } else if (cmd->base.speed == SPEED_1000) {
pr_info("1000 half duplex is not supported.\n");
return -EINVAL;
}
@@ -1370,11 +1377,11 @@ done:
static const struct ethtool_ops gelic_ether_ethtool_ops = {
.get_drvinfo = gelic_net_get_drvinfo,
- .get_settings = gelic_ether_get_settings,
- .set_settings = gelic_ether_set_settings,
.get_link = ethtool_op_get_link,
.get_wol = gelic_net_get_wol,
.set_wol = gelic_net_set_wol,
+ .get_link_ksettings = gelic_ether_get_link_ksettings,
+ .set_link_ksettings = gelic_ether_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index ffe519382e11..16bd036d0682 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -47,19 +47,23 @@ static struct {
};
static int
-spider_net_ethtool_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct spider_net_card *card;
card = netdev_priv(netdev);
- cmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- cmd->port = PORT_FIBRE;
- ethtool_cmd_speed_set(cmd, card->phy.speed);
- cmd->duplex = DUPLEX_FULL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.speed = card->phy.speed;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
}
const struct ethtool_ops spider_net_ethtool_ops = {
- .get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
.get_wol = spider_net_ethtool_get_wol,
.get_msglevel = spider_net_ethtool_get_msglevel,
@@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
.get_strings = spider_net_get_strings,
.get_sset_count = spider_net_get_sset_count,
.get_ethtool_stats = spider_net_get_ethtool_stats,
+ .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c5583991da4a..5ac6eaa9e785 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev)
TSI_WRITE(TSI108_EC_INTMASK, ~0);
}
-static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_gset(&data->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
-static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_sset(&data->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
@@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = tsi108_get_settings,
- .set_settings = tsi108_set_settings,
+ .get_link_ksettings = tsi108_get_link_ksettings,
+ .set_link_ksettings = tsi108_set_link_ksettings,
};
static const struct net_device_ops tsi108_netdev_ops = {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c068c58428f7..4cf41f779d0e 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_gset(&rp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
mutex_unlock(&rp->task_lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_sset(&rp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
rhine_set_carrier(&rp->mii_if);
mutex_unlock(&rp->task_lock);
@@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.get_wol = rhine_get_wol,
.set_wol = rhine_set_wol,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d088788b27a7..ef9538ee53d0 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev)
velocity_set_power_state(vptr, PCI_D3hot);
}
-static int velocity_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem *regs = vptr->mac_regs;
u32 status;
+ u32 supported, advertising;
+
status = check_connection_type(vptr->mac_regs);
- cmd->supported = SUPPORTED_TP |
+ supported = SUPPORTED_TP |
SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev,
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+ advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
- cmd->advertising |=
+ advertising |=
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev,
} else {
switch (vptr->options.spd_dpx) {
case SPD_DPX_1000_FULL:
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
break;
case SPD_DPX_100_HALF:
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
break;
case SPD_DPX_100_FULL:
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case SPD_DPX_10_HALF:
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
break;
case SPD_DPX_10_FULL:
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
break;
default:
break;
@@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev,
}
if (status & VELOCITY_SPEED_1000)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & VELOCITY_SPEED_100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
- cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- cmd->port = PORT_TP;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+ cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
if (status & VELOCITY_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int velocity_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 curr_status;
u32 new_status = 0;
int ret = 0;
@@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev,
curr_status = check_connection_type(vptr->mac_regs);
curr_status &= (~VELOCITY_LINK_FAIL);
- new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+ new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
- new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+ new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
+ VELOCITY_DUPLEX_FULL : 0);
if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
(new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
@@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops velocity_ethtool_ops = {
- .get_settings = velocity_get_settings,
- .set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
@@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = {
.get_coalesce = velocity_get_coalesce,
.set_coalesce = velocity_set_coalesce,
.begin = velocity_ethtool_up,
- .complete = velocity_ethtool_down
+ .complete = velocity_ethtool_down,
+ .get_link_ksettings = velocity_get_link_ksettings,
+ .set_link_ksettings = velocity_set_link_ksettings,
};
#if defined(CONFIG_PM) && defined(CONFIG_INET)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b96e96919e31..33c595f4691d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -301,7 +301,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 6575f880f1be..7d101714c2ef 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -175,16 +175,15 @@ static void fjes_get_drvinfo(struct net_device *netdev,
"platform:%s", plat_dev->name);
}
-static int fjes_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int fjes_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
- ecmd->supported = 0;
- ecmd->advertising = 0;
- ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
- ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ecmd->base.duplex = DUPLEX_FULL;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ecmd->base.port = PORT_NONE;
+ ecmd->base.speed = 20000; /* 20Gb/s */
return 0;
}
@@ -296,7 +295,6 @@ static int fjes_get_dump_data(struct net_device *netdev,
}
static const struct ethtool_ops fjes_ethtool_ops = {
- .get_settings = fjes_get_settings,
.get_drvinfo = fjes_get_drvinfo,
.get_ethtool_stats = fjes_get_ethtool_stats,
.get_strings = fjes_get_strings,
@@ -306,6 +304,7 @@ static const struct ethtool_ops fjes_ethtool_ops = {
.set_dump = fjes_set_dump,
.get_dump_flag = fjes_get_dump_flag,
.get_dump_data = fjes_get_dump_data,
+ .get_link_ksettings = fjes_get_link_ksettings,
};
void fjes_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89698741682f..3e1854f34420 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -58,6 +58,9 @@ struct pdp_ctx {
struct in_addr ms_addr_ip4;
struct in_addr sgsn_addr_ip4;
+ struct sock *sk;
+ struct net_device *dev;
+
atomic_t tx_seq;
struct rcu_head rcu_head;
};
@@ -66,8 +69,8 @@ struct pdp_ctx {
struct gtp_dev {
struct list_head list;
- struct socket *sock0;
- struct socket *sock1u;
+ struct sock *sk0;
+ struct sock *sk1u;
struct net_device *dev;
@@ -84,6 +87,8 @@ struct gtp_net {
static u32 gtp_h_initval;
+static void pdp_context_delete(struct pdp_ctx *pctx);
+
static inline u32 gtp0_hashfn(u64 tid)
{
u32 *tid32 = (u32 *) &tid;
@@ -175,9 +180,42 @@ static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
return false;
}
+static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen)
+{
+ struct pcpu_sw_netstats *stats;
+
+ if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+ netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
+ return 1;
+ }
+
+ /* Get rid of the GTP + UDP headers. */
+ if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
+ return -1;
+
+ netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
+
+ /* Now that the UDP and the GTP header have been removed, set up the
+ * new network header. This is required by the upper layer to
+ * calculate the transport header.
+ */
+ skb_reset_network_header(skb);
+
+ skb->dev = pctx->dev;
+
+ stats = this_cpu_ptr(pctx->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+ return 0;
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
-static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
- bool xnet)
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
@@ -201,17 +239,10 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
return 1;
}
- if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
- netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- return 1;
- }
-
- /* Get rid of the GTP + UDP headers. */
- return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+ return gtp_rx(pctx, skb, hdrlen);
}
-static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
- bool xnet)
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
@@ -250,37 +281,33 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
return 1;
}
- if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
- netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- return 1;
- }
-
- /* Get rid of the GTP + UDP headers. */
- return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+ return gtp_rx(pctx, skb, hdrlen);
}
-static void gtp_encap_disable(struct gtp_dev *gtp)
+static void gtp_encap_destroy(struct sock *sk)
{
- if (gtp->sock0 && gtp->sock0->sk) {
- udp_sk(gtp->sock0->sk)->encap_type = 0;
- rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
- }
- if (gtp->sock1u && gtp->sock1u->sk) {
- udp_sk(gtp->sock1u->sk)->encap_type = 0;
- rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
- }
+ struct gtp_dev *gtp;
- gtp->sock0 = NULL;
- gtp->sock1u = NULL;
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (gtp) {
+ udp_sk(sk)->encap_type = 0;
+ rcu_assign_sk_user_data(sk, NULL);
+ sock_put(sk);
+ }
}
-static void gtp_encap_destroy(struct sock *sk)
+static void gtp_encap_disable_sock(struct sock *sk)
{
- struct gtp_dev *gtp;
+ if (!sk)
+ return;
- gtp = rcu_dereference_sk_user_data(sk);
- if (gtp)
- gtp_encap_disable(gtp);
+ gtp_encap_destroy(sk);
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+ gtp_encap_disable_sock(gtp->sk0);
+ gtp_encap_disable_sock(gtp->sk1u);
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -288,10 +315,8 @@ static void gtp_encap_destroy(struct sock *sk)
*/
static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats;
struct gtp_dev *gtp;
- bool xnet;
- int ret;
+ int ret = 0;
gtp = rcu_dereference_sk_user_data(sk);
if (!gtp)
@@ -299,16 +324,14 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
-
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
- ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+ ret = gtp0_udp_encap_recv(gtp, skb);
break;
case UDP_ENCAP_GTP1U:
netdev_dbg(gtp->dev, "received GTP1U packet\n");
- ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+ ret = gtp1u_udp_encap_recv(gtp, skb);
break;
default:
ret = -1; /* Shouldn't happen. */
@@ -317,33 +340,17 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
switch (ret) {
case 1:
netdev_dbg(gtp->dev, "pass up to the process\n");
- return 1;
+ break;
case 0:
- netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
break;
case -1:
netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
kfree_skb(skb);
- return 0;
+ ret = 0;
+ break;
}
- /* Now that the UDP and the GTP header have been removed, set up the
- * new network header. This is required by the upper layer to
- * calculate the transport header.
- */
- skb_reset_network_header(skb);
-
- skb->dev = gtp->dev;
-
- stats = this_cpu_ptr(gtp->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- netif_rx(skb);
-
- return 0;
+ return ret;
}
static int gtp_dev_init(struct net_device *dev)
@@ -367,8 +374,9 @@ static void gtp_dev_uninit(struct net_device *dev)
free_percpu(dev->tstats);
}
-static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
- const struct sock *sk, __be32 daddr)
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+ const struct sock *sk,
+ __be32 daddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
@@ -377,7 +385,7 @@ static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
- return ip_route_output_key(net, fl4);
+ return ip_route_output_key(sock_net(sk), fl4);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -466,7 +474,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
- struct sock *sk;
__be16 df;
int mtu;
@@ -482,30 +489,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
- switch (pctx->gtp_version) {
- case GTP_V0:
- if (gtp->sock0)
- sk = gtp->sock0->sk;
- else
- sk = NULL;
- break;
- case GTP_V1:
- if (gtp->sock1u)
- sk = gtp->sock1u->sk;
- else
- sk = NULL;
- break;
- default:
- return -ENOENT;
- }
-
- if (!sk) {
- netdev_dbg(dev, "no userspace socket is available, skip\n");
- return -ENOENT;
- }
-
- rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
- pctx->sgsn_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->sgsn_addr_ip4.s_addr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->sgsn_addr_ip4.s_addr);
@@ -550,7 +534,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+ gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
@@ -640,27 +624,23 @@ static void gtp_link_setup(struct net_device *dev)
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1);
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- int hashsize, err, fd0, fd1;
struct gtp_dev *gtp;
struct gtp_net *gn;
+ int hashsize, err;
- if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
return -EINVAL;
gtp = netdev_priv(dev);
- fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
- fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
-
- err = gtp_encap_enable(dev, gtp, fd0, fd1);
+ err = gtp_encap_enable(gtp, data);
if (err < 0)
- goto out_err;
+ return err;
if (!data[IFLA_GTP_PDP_HASHSIZE])
hashsize = 1024;
@@ -688,7 +668,6 @@ out_hashtable:
gtp_hashtable_free(gtp);
out_encap:
gtp_encap_disable(gtp);
-out_err:
return err;
}
@@ -747,21 +726,6 @@ static struct rtnl_link_ops gtp_link_ops __read_mostly = {
.fill_info = gtp_fill_info,
};
-static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
-{
- struct net *net;
-
- /* Examine the link attributes and figure out which network namespace
- * we are talking about.
- */
- if (tb[GTPA_NET_NS_FD])
- net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
- else
- net = get_net(src_net);
-
- return net;
-}
-
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
int i;
@@ -791,85 +755,111 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
struct pdp_ctx *pctx;
int i;
- for (i = 0; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
- hlist_del_rcu(&pctx->hlist_tid);
- hlist_del_rcu(&pctx->hlist_addr);
- kfree_rcu(pctx, rcu_head);
- }
- }
+ for (i = 0; i < gtp->hash_size; i++)
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
+
synchronize_rcu();
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1)
+static struct sock *gtp_encap_enable_socket(int fd, int type,
+ struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
- struct socket *sock0, *sock1u;
+ struct socket *sock;
+ struct sock *sk;
int err;
- netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
+ pr_debug("enable gtp on %d, %d\n", fd, type);
- sock0 = sockfd_lookup(fd_gtp0, &err);
- if (sock0 == NULL) {
- netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
- return -ENOENT;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+ return NULL;
}
- if (sock0->sk->sk_protocol != IPPROTO_UDP) {
- netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
- err = -EINVAL;
- goto err1;
- }
-
- sock1u = sockfd_lookup(fd_gtp1, &err);
- if (sock1u == NULL) {
- netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
- err = -ENOENT;
- goto err1;
+ if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ pr_debug("socket fd=%d not UDP\n", fd);
+ sk = ERR_PTR(-EINVAL);
+ goto out_sock;
}
- if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
- netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
- err = -EINVAL;
- goto err2;
+ if (rcu_dereference_sk_user_data(sock->sk)) {
+ sk = ERR_PTR(-EBUSY);
+ goto out_sock;
}
- netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
-
- gtp->sock0 = sock0;
- gtp->sock1u = sock1u;
+ sk = sock->sk;
+ sock_hold(sk);
tuncfg.sk_user_data = gtp;
+ tuncfg.encap_type = type;
tuncfg.encap_rcv = gtp_encap_recv;
tuncfg.encap_destroy = gtp_encap_destroy;
- tuncfg.encap_type = UDP_ENCAP_GTP0;
- setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
-
- tuncfg.encap_type = UDP_ENCAP_GTP1U;
- setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
- err = 0;
-err2:
- sockfd_put(sock1u);
-err1:
- sockfd_put(sock0);
- return err;
+out_sock:
+ sockfd_put(sock);
+ return sk;
}
-static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct sock *sk1u = NULL;
+ struct sock *sk0 = NULL;
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
- if (ifindex == gtp->dev->ifindex)
- return gtp->dev;
+ if (data[IFLA_GTP_FD0]) {
+ u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+
+ sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
}
- return NULL;
+
+ if (data[IFLA_GTP_FD1]) {
+ u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+ sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ if (sk0)
+ gtp_encap_disable_sock(sk0);
+ return PTR_ERR(sk1u);
+ }
+ }
+
+ gtp->sk0 = sk0;
+ gtp->sk1u = sk1u;
+
+ return 0;
+}
+
+static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
+{
+ struct gtp_dev *gtp = NULL;
+ struct net_device *dev;
+ struct net *net;
+
+ /* Examine the link attributes and figure out which network namespace
+ * we are talking about.
+ */
+ if (nla[GTPA_NET_NS_FD])
+ net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
+ else
+ net = get_net(src_net);
+
+ if (IS_ERR(net))
+ return NULL;
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
+ if (dev->netdev_ops == &gtp_netdev_ops)
+ gtp = netdev_priv(dev);
+
+ put_net(net);
+ return gtp;
}
static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
@@ -899,9 +889,10 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
- struct gtp_dev *gtp = netdev_priv(dev);
+ struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
struct pdp_ctx *pctx;
bool found = false;
@@ -940,6 +931,9 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
if (pctx == NULL)
return -ENOMEM;
+ sock_hold(sk);
+ pctx->sk = sk;
+ pctx->dev = gtp->dev;
ipv4_pdp_fill(pctx, info);
atomic_set(&pctx->tx_seq, 0);
@@ -976,10 +970,27 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
return 0;
}
+static void pdp_context_free(struct rcu_head *head)
+{
+ struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
+
+ sock_put(pctx->sk);
+ kfree(pctx);
+}
+
+static void pdp_context_delete(struct pdp_ctx *pctx)
+{
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ call_rcu(&pctx->rcu_head, pdp_context_free);
+}
+
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
- struct net_device *dev;
- struct net *net;
+ unsigned int version;
+ struct gtp_dev *gtp;
+ struct sock *sk;
+ int err;
if (!info->attrs[GTPA_VERSION] ||
!info->attrs[GTPA_LINK] ||
@@ -987,7 +998,9 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
!info->attrs[GTPA_MS_ADDRESS])
return -EINVAL;
- switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
+
+ switch (version) {
case GTP_V0:
if (!info->attrs[GTPA_TID] ||
!info->attrs[GTPA_FLOW])
@@ -1003,77 +1016,101 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
+ rcu_read_lock();
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+ if (!gtp) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (version == GTP_V0)
+ sk = gtp->sk0;
+ else if (version == GTP_V1)
+ sk = gtp->sk1u;
+ else
+ sk = NULL;
+
+ if (!sk) {
+ err = -ENODEV;
+ goto out_unlock;
}
- put_net(net);
- return ipv4_pdp_add(dev, info);
+ err = ipv4_pdp_add(gtp, sk, info);
+
+out_unlock:
+ rcu_read_unlock();
+ return err;
}
-static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
+ struct nlattr *nla[])
{
- struct net_device *dev;
- struct pdp_ctx *pctx;
struct gtp_dev *gtp;
- struct net *net;
- if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK])
- return -EINVAL;
+ gtp = gtp_find_dev(net, nla);
+ if (!gtp)
+ return ERR_PTR(-ENODEV);
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
+ if (nla[GTPA_MS_ADDRESS]) {
+ __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
+ return ipv4_pdp_find(gtp, ip);
+ } else if (nla[GTPA_VERSION]) {
+ u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
+
+ if (gtp_version == GTP_V0 && nla[GTPA_TID])
+ return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
+ else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
+ return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
}
- put_net(net);
- gtp = netdev_priv(dev);
+ return ERR_PTR(-EINVAL);
+}
- switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
- case GTP_V0:
- if (!info->attrs[GTPA_TID])
- return -EINVAL;
- pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
- break;
- case GTP_V1:
- if (!info->attrs[GTPA_I_TEI])
- return -EINVAL;
- pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
- break;
+static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
+{
+ struct pdp_ctx *pctx;
- default:
+ if (nla[GTPA_LINK])
+ pctx = gtp_find_pdp_by_link(net, nla);
+ else
+ pctx = ERR_PTR(-EINVAL);
+
+ if (!pctx)
+ pctx = ERR_PTR(-ENOENT);
+
+ return pctx;
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pdp_ctx *pctx;
+ int err = 0;
+
+ if (!info->attrs[GTPA_VERSION])
return -EINVAL;
- }
- if (pctx == NULL)
- return -ENOENT;
+ rcu_read_lock();
+
+ pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
+ goto out_unlock;
+ }
if (pctx->gtp_version == GTP_V0)
- netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+ netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
- netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+ netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- hlist_del_rcu(&pctx->hlist_tid);
- hlist_del_rcu(&pctx->hlist_addr);
- kfree_rcu(pctx, rcu_head);
+ pdp_context_delete(pctx);
- return 0;
+out_unlock:
+ rcu_read_unlock();
+ return err;
}
static struct genl_family gtp_genl_family;
@@ -1117,59 +1154,17 @@ nla_put_failure:
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
- struct net_device *dev;
struct sk_buff *skb2;
- struct gtp_dev *gtp;
- u32 gtp_version;
- struct net *net;
int err;
- if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK])
+ if (!info->attrs[GTPA_VERSION])
return -EINVAL;
- gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
- switch (gtp_version) {
- case GTP_V0:
- case GTP_V1:
- break;
- default:
- return -EINVAL;
- }
-
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
-
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
- }
- put_net(net);
-
- gtp = netdev_priv(dev);
-
rcu_read_lock();
- if (gtp_version == GTP_V0 &&
- info->attrs[GTPA_TID]) {
- u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
-
- pctx = gtp0_pdp_find(gtp, tid);
- } else if (gtp_version == GTP_V1 &&
- info->attrs[GTPA_I_TEI]) {
- u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
-
- pctx = gtp1_pdp_find(gtp, tid);
- } else if (info->attrs[GTPA_MS_ADDRESS]) {
- __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
-
- pctx = ipv4_pdp_find(gtp, ip);
- }
- if (pctx == NULL) {
- err = -ENOENT;
+ pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
goto err_unlock;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f9f3dba7a588..b09c4fca1805 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
+int netvsc_poll(struct napi_struct *napi, int budget);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -722,6 +723,7 @@ struct net_device_context {
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
+ struct napi_struct napi;
struct multi_send_data msd;
struct multi_recv_comp mrc;
atomic_t queue_sends;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4c1d8cca247b..0e71164849dd 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -91,15 +91,6 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
}
-static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
- u16 q_idx)
-{
- const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
-
- return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
- atomic_read(&nvchan->queue_sends) == 0;
-}
-
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
@@ -556,6 +547,7 @@ void netvsc_device_remove(struct hv_device *device)
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *net_device = net_device_ctx->nvdev;
+ int i;
netvsc_disconnect_vsp(device);
@@ -570,6 +562,9 @@ void netvsc_device_remove(struct hv_device *device)
/* Now, we can close the channel safely */
vmbus_close(device->channel);
+ for (i = 0; i < net_device->num_chn; i++)
+ napi_disable(&net_device->chan_table[i].napi);
+
/* Release all resources */
free_netvsc_device(net_device);
}
@@ -600,9 +595,9 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
- struct vmpacket_descriptor *packet)
+ const struct vmpacket_descriptor *desc)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
@@ -647,14 +642,11 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
static void netvsc_send_completion(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
- struct vmpacket_descriptor *packet)
+ const struct vmpacket_descriptor *desc)
{
- struct nvsp_message *nvsp_packet;
+ struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
struct net_device *ndev = hv_get_drvdata(device);
- nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
- (packet->offset8 << 3));
-
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
@@ -668,7 +660,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
netvsc_send_tx_complete(net_device, incoming_channel,
- device, packet);
+ device, desc);
break;
default:
@@ -1066,28 +1058,29 @@ static inline struct recv_comp_data *get_recv_comp_slot(
return rcd;
}
-static void netvsc_receive(struct net_device *ndev,
+static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct net_device_context *net_device_ctx,
struct hv_device *device,
struct vmbus_channel *channel,
- struct vmtransfer_page_packet_header *vmxferpage_packet,
+ const struct vmpacket_descriptor *desc,
struct nvsp_message *nvsp)
{
+ const struct vmtransfer_page_packet_header *vmxferpage_packet
+ = container_of(desc, const struct vmtransfer_page_packet_header, d);
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
int ret;
- struct recv_comp_data *rcd;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
"Unknown nvsp packet type received %u\n",
nvsp->hdr.msg_type);
- return;
+ return 0;
}
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
@@ -1095,7 +1088,7 @@ static void netvsc_receive(struct net_device *ndev,
"Invalid xfer page set id - expecting %x got %x\n",
NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
- return;
+ return 0;
}
count = vmxferpage_packet->range_cnt;
@@ -1111,26 +1104,26 @@ static void netvsc_receive(struct net_device *ndev,
channel, data, buflen);
}
- if (!net_device->chan_table[q_idx].mrc.buf) {
+ if (net_device->chan_table[q_idx].mrc.buf) {
+ struct recv_comp_data *rcd;
+
+ rcd = get_recv_comp_slot(net_device, channel, q_idx);
+ if (rcd) {
+ rcd->tid = vmxferpage_packet->d.trans_id;
+ rcd->status = status;
+ } else {
+ netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
+ q_idx, vmxferpage_packet->d.trans_id);
+ }
+ } else {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
if (ret)
netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
q_idx, vmxferpage_packet->d.trans_id, ret);
- return;
- }
-
- rcd = get_recv_comp_slot(net_device, channel, q_idx);
-
- if (!rcd) {
- netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
- q_idx, vmxferpage_packet->d.trans_id);
- return;
}
-
- rcd->tid = vmxferpage_packet->d.trans_id;
- rcd->status = status;
+ return count;
}
static void netvsc_send_table(struct hv_device *hdev,
@@ -1176,17 +1169,15 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
}
}
-static void netvsc_process_raw_pkt(struct hv_device *device,
- struct vmbus_channel *channel,
- struct netvsc_device *net_device,
- struct net_device *ndev,
- u64 request_id,
- struct vmpacket_descriptor *desc)
+static int netvsc_process_raw_pkt(struct hv_device *device,
+ struct vmbus_channel *channel,
+ struct netvsc_device *net_device,
+ struct net_device *ndev,
+ u64 request_id,
+ const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct nvsp_message *nvmsg
- = (struct nvsp_message *)((unsigned long)desc
- + (desc->offset8 << 3));
+ struct nvsp_message *nvmsg = hv_pkt_data(desc);
switch (desc->type) {
case VM_PKT_COMP:
@@ -1194,10 +1185,8 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(ndev, net_device, net_device_ctx,
- device, channel,
- (struct vmtransfer_page_packet_header *)desc,
- nvmsg);
+ return netvsc_receive(ndev, net_device, net_device_ctx,
+ device, channel, desc, nvmsg);
break;
case VM_PKT_DATA_INBAND:
@@ -1209,47 +1198,73 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
desc->type, request_id);
break;
}
+
+ return 0;
+}
+
+static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
+{
+ struct vmbus_channel *primary = channel->primary_channel;
+
+ return primary ? primary->device_obj : channel->device_obj;
+}
+
+int netvsc_poll(struct napi_struct *napi, int budget)
+{
+ struct netvsc_channel *nvchan
+ = container_of(napi, struct netvsc_channel, napi);
+ struct vmbus_channel *channel = nvchan->channel;
+ struct hv_device *device = netvsc_channel_to_device(channel);
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
+ const struct vmpacket_descriptor *desc;
+ int work_done = 0;
+
+ desc = hv_pkt_iter_first(channel);
+ while (desc) {
+ int count;
+
+ count = netvsc_process_raw_pkt(device, channel, net_device,
+ ndev, desc->trans_id, desc);
+ work_done += count;
+ desc = __hv_pkt_iter_next(channel, desc);
+
+ /* If receive packet budget is exhausted, reschedule */
+ if (work_done >= budget) {
+ work_done = budget;
+ break;
+ }
+ }
+ hv_pkt_iter_close(channel);
+
+ /* If ring is empty and NAPI is not doing polling */
+ if (work_done < budget &&
+ napi_complete_done(napi, work_done) &&
+ hv_end_read(&channel->inbound) != 0)
+ napi_reschedule(napi);
+
+ netvsc_chk_recv_comp(net_device, channel, q_idx);
+ return work_done;
}
void netvsc_channel_cb(void *context)
{
struct vmbus_channel *channel = context;
+ struct hv_device *device = netvsc_channel_to_device(channel);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- struct hv_device *device;
struct netvsc_device *net_device;
- struct vmpacket_descriptor *desc;
struct net_device *ndev;
- bool need_to_commit = false;
-
- if (channel->primary_channel != NULL)
- device = channel->primary_channel->device_obj;
- else
- device = channel->device_obj;
ndev = hv_get_drvdata(device);
if (unlikely(!ndev))
return;
- net_device = net_device_to_netvsc_device(ndev);
- if (unlikely(net_device->destroy) &&
- netvsc_channel_idle(net_device, q_idx))
- return;
-
- /* commit_rd_index() -> hv_signal_on_read() needs this. */
- init_cached_read_index(channel);
-
- while ((desc = get_next_pkt_raw(channel)) != NULL) {
- netvsc_process_raw_pkt(device, channel, net_device,
- ndev, desc->trans_id, desc);
-
- put_pkt_raw(channel, desc);
- need_to_commit = true;
- }
-
- if (need_to_commit)
- commit_rd_index(channel);
+ /* disable interupts from host */
+ hv_begin_read(&channel->inbound);
- netvsc_chk_recv_comp(net_device, channel, q_idx);
+ net_device = net_device_to_netvsc_device(ndev);
+ napi_schedule(&net_device->chan_table[q_idx].napi);
}
/*
@@ -1271,6 +1286,11 @@ int netvsc_device_add(struct hv_device *device,
net_device->ring_size = ring_size;
+ /* Because the device uses NAPI, all the interrupt batching and
+ * control is done via Net softirq, not the channel handling
+ */
+ set_channel_read_mode(device->channel, HV_CALL_ISR);
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
@@ -1288,8 +1308,16 @@ int netvsc_device_add(struct hv_device *device,
* chn_table with the default channel to use it before subchannels are
* opened.
*/
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- net_device->chan_table[i].channel = device->channel;
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->channel = device->channel;
+ netif_napi_add(ndev, &nvchan->napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
+ }
+
+ /* Enable NAPI handler for init callbacks */
+ napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
@@ -1309,6 +1337,8 @@ int netvsc_device_add(struct hv_device *device,
return ret;
close:
+ napi_disable(&net_device->chan_table[0].napi);
+
/* Now, we can close the channel safely */
vmbus_close(device->channel);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5ede87f30463..191372486a87 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -584,13 +584,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ struct napi_struct *napi,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan,
void *data, u32 buflen)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(net, buflen);
+ skb = napi_alloc_skb(napi, buflen);
if (!skb)
return skb;
@@ -637,11 +638,11 @@ int netvsc_recv_callback(struct net_device *net,
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device = net_device_ctx->nvdev;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
struct net_device *vf_netdev;
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
-
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
@@ -659,7 +660,8 @@ int netvsc_recv_callback(struct net_device *net,
net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
+ skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
+ csum_info, vlan, data, len);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
rcu_read_unlock();
@@ -674,7 +676,7 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = &net_device->chan_table[q_idx].rx_stats;
+ rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += len;
@@ -685,12 +687,7 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
- /*
- * Pass the skb back up. Network stack will deallocate the skb when it
- * is done.
- * TODO - use NAPI?
- */
- netif_receive_skb(skb);
+ napi_gro_receive(&nvchan->napi, skb);
rcu_read_unlock();
return 0;
@@ -787,18 +784,19 @@ static int netvsc_set_channels(struct net_device *net,
return ret;
}
-static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
+static bool
+netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
{
- struct ethtool_cmd diff1 = *cmd;
- struct ethtool_cmd diff2 = {};
+ struct ethtool_link_ksettings diff1 = *cmd;
+ struct ethtool_link_ksettings diff2 = {};
- ethtool_cmd_speed_set(&diff1, 0);
- diff1.duplex = 0;
+ diff1.base.speed = 0;
+ diff1.base.duplex = 0;
/* advertising and cmd are usually set */
- diff1.advertising = 0;
- diff1.cmd = 0;
+ ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+ diff1.base.cmd = 0;
/* We set port to PORT_OTHER */
- diff2.port = PORT_OTHER;
+ diff2.base.port = PORT_OTHER;
return !memcmp(&diff1, &diff2, sizeof(diff1));
}
@@ -811,30 +809,32 @@ static void netvsc_init_settings(struct net_device *dev)
ndc->duplex = DUPLEX_UNKNOWN;
}
-static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- ethtool_cmd_speed_set(cmd, ndc->speed);
- cmd->duplex = ndc->duplex;
- cmd->port = PORT_OTHER;
+ cmd->base.speed = ndc->speed;
+ cmd->base.duplex = ndc->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
-static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
u32 speed;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->duplex) ||
+ !ethtool_validate_duplex(cmd->base.duplex) ||
!netvsc_validate_ethtool_ss_cmd(cmd))
return -EINVAL;
ndc->speed = speed;
- ndc->duplex = cmd->duplex;
+ ndc->duplex = cmd->base.duplex;
return 0;
}
@@ -1168,13 +1168,13 @@ static const struct ethtool_ops ethtool_ops = {
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_settings = netvsc_get_settings,
- .set_settings = netvsc_set_settings,
.get_rxnfc = netvsc_get_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
+ .get_link_ksettings = netvsc_get_link_ksettings,
+ .set_link_ksettings = netvsc_set_link_ksettings,
};
static const struct net_device_ops device_ops = {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 19356f56b7b1..d7b6311e6c19 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1012,6 +1012,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (ret == 0)
nvscdev->chan_table[chn_index].channel = new_sc;
+ napi_enable(&nvscdev->chan_table[chn_index].napi);
+
spin_lock_irqsave(&nvscdev->sc_lock, flags);
nvscdev->num_sc_offered--;
spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 36877ba65516..4daf3d0926a8 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -372,18 +372,19 @@ static void ntb_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
-static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ntb_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = SUPPORTED_Backplane;
- cmd->advertising = ADVERTISED_Backplane;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_OTHER;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_DUMMY1;
- cmd->autoneg = AUTONEG_ENABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
+
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_ENABLE;
return 0;
}
@@ -391,7 +392,7 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static const struct ethtool_ops ntb_ethtool_ops = {
.get_drvinfo = ntb_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = ntb_get_settings,
+ .get_link_ksettings = ntb_get_link_ksettings,
};
static const struct ntb_queue_handlers ntb_netdev_handlers = {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ab9ad689617c..9656dbeb5de5 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -221,9 +221,9 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
return val;
if (enable)
- val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
else
- val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
MDIO_MMD_AN, (u32)val);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index d1c2614dad3a..caa9f6e17f34 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -1,7 +1,7 @@
/*
* Broadcom BCM7xxx internal transceivers support.
*
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,7 +19,7 @@
/* Broadcom BCM7xxx internal PHY registers */
-/* 40nm only register definitions */
+/* EPHY only register definitions */
#define MII_BCM7XXX_100TX_AUX_CTL 0x10
#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
#define MII_BCM7XXX_100TX_DISC 0x14
@@ -27,6 +27,19 @@
#define MII_BCM7XXX_64CLK_MDIO BIT(12)
#define MII_BCM7XXX_TEST 0x1f
#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
+#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
+#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
+#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
+#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
+#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
+#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
+#define MII_BCM7XXX_AN_NULL_MSG_EN BIT(0)
+#define MII_BCM7XXX_AN_EEE_EN BIT(1)
+#define MII_BCM7XXX_SHD_3_EEE_THRESH 0xe
+#define MII_BCM7XXX_EEE_THRESH_DEF 0x50
+#define MII_BCM7XXX_SHD_3_TL4 0x23
+#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
/* 28nm only register definitions */
#define MISC_ADDR(base, channel) base, channel
@@ -286,6 +299,181 @@ static int phy_set_clr_bits(struct phy_device *dev, int location,
return v;
}
+static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set current trim values INT_trim = -1, Ext_trim =0 */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_TL4_RST_MSK, 0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset disable */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ 0, MII_BCM7XXX_TL4_RST_MSK);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The 28nm EPHY does not support Clause 45 (MMD) used by bcm-phy-lib */
+static int bcm7xxx_28nm_ephy_apd_enable(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 1 */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST,
+ MII_BRCM_FET_BT_SRE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Enable auto-power down */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
+ MII_BRCM_FET_SHDW_AS2_APDE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* reset shadow mode 1 */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, 0,
+ MII_BRCM_FET_BT_SRE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_ephy_eee_enable(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Advertise supported modes */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_AN_EEE_ADV);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MDIO_EEE_100TX);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Restore Defaults */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_PCS_CTRL_2);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_PCS_CTRL_2_DEF);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_EEE_THRESH);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_EEE_THRESH_DEF);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Enable EEE autonegotiation */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_AN_STAT);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ (MII_BCM7XXX_AN_NULL_MSG_EN | MII_BCM7XXX_AN_EEE_EN));
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* Restart autoneg */
+ phy_write(phydev, MII_BMCR,
+ (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART));
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
+{
+ u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+ int ret = 0;
+
+ pr_info_once("%s: %s PHY revision: 0x%02x\n",
+ phydev_name(phydev), phydev->drv->name, rev);
+
+ /* Dummy read to a register to workaround a possible issue upon reset
+ * where the internal inverter may not allow the first MDIO transaction
+ * to pass the MDIO management controller and make us return 0xffff for
+ * such reads.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ /* Apply AFE software work-around if necessary */
+ if (rev == 0x01) {
+ ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = bcm7xxx_28nm_ephy_eee_enable(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_ephy_apd_enable(phydev);
+}
+
+static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm7xxx_28nm_ephy_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
static int bcm7xxx_config_init(struct phy_device *phydev)
{
int ret;
@@ -434,6 +622,23 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.probe = bcm7xxx_28nm_probe, \
}
+#define BCM7XXX_28NM_EPHY(_oui, _name) \
+{ \
+ .phy_id = (_oui), \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ .features = PHY_BASIC_FEATURES, \
+ .flags = PHY_IS_INTERNAL, \
+ .config_init = bcm7xxx_28nm_ephy_config_init, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ .resume = bcm7xxx_28nm_ephy_resume, \
+ .get_sset_count = bcm_phy_get_sset_count, \
+ .get_strings = bcm_phy_get_strings, \
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
+ .probe = bcm7xxx_28nm_probe, \
+}
+
#define BCM7XXX_40NM_EPHY(_oui, _name) \
{ \
.phy_id = (_oui), \
@@ -450,6 +655,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
@@ -466,6 +674,9 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7260, 0xfffffff0, },
+ { PHY_ID_BCM7268, 0xfffffff0, },
+ { PHY_ID_BCM7271, 0xfffffff0, },
{ PHY_ID_BCM7278, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8c73b2e771dd..34395230ce70 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -1,7 +1,7 @@
/*
* Broadcom UniMAC MDIO bus controller driver
*
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -228,6 +228,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
{ .compatible = "brcm,genet-mdio-v2", },
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 34cc3c590aa5..16dfb4cb1980 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2442,18 +2442,16 @@ static struct miscdevice tun_miscdev = {
/* ethtool interface */
-static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- cmd->supported = 0;
- cmd->advertising = 0;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+static int tun_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -2516,7 +2514,6 @@ static int tun_set_coalesce(struct net_device *dev,
}
static const struct ethtool_ops tun_ethtool_ops = {
- .get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
@@ -2524,6 +2521,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
+ .get_link_ksettings = tun_get_link_ksettings,
};
static int tun_queue_resize(struct tun_struct *tun)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index a3a7db0702d8..4a0ae7ce83f6 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -620,16 +620,18 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
return 0;
}
-static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
- return mii_ethtool_gset(&dev->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
}
-static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
- return mii_ethtool_sset(&dev->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&dev->mii, cmd);
}
static int
@@ -826,11 +828,11 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.set_wol = ax88179_set_wol,
.get_eeprom_len = ax88179_get_eeprom_len,
.get_eeprom = ax88179_get_eeprom,
- .get_settings = ax88179_get_settings,
- .set_settings = ax88179_set_settings,
.get_eee = ax88179_get_eee,
.set_eee = ax88179_set_eee,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = ax88179_get_link_ksettings,
+ .set_link_ksettings = ax88179_set_link_ksettings,
};
static void ax88179_set_multicast(struct net_device *net)
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 0acc9b640419..fce92f0e5abd 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -688,29 +688,34 @@ static void catc_get_drvinfo(struct net_device *dev,
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
-static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int catc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct catc *catc = netdev_priv(dev);
if (!catc->is_f5u011)
return -EOPNOTSUPP;
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
- cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_HALF;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
return 0;
}
static const struct ethtool_ops ops = {
.get_drvinfo = catc_get_drvinfo,
- .get_settings = catc_get_settings,
- .get_link = ethtool_op_get_link
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = catc_get_link_ksettings,
};
/*
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 986243c932cc..227e1fdd9228 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3800,7 +3800,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
}
static
-int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+int rtl8152_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct r8152 *tp = netdev_priv(netdev);
int ret;
@@ -3814,7 +3815,7 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
- ret = mii_ethtool_gset(&tp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd);
mutex_unlock(&tp->control);
@@ -3824,7 +3825,8 @@ out:
return ret;
}
-static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8152_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct r8152 *tp = netdev_priv(dev);
int ret;
@@ -3835,11 +3837,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
- ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex);
if (!ret) {
- tp->autoneg = cmd->autoneg;
- tp->speed = cmd->speed;
- tp->duplex = cmd->duplex;
+ tp->autoneg = cmd->base.autoneg;
+ tp->speed = cmd->base.speed;
+ tp->duplex = cmd->base.duplex;
}
mutex_unlock(&tp->control);
@@ -4117,8 +4120,6 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
- .get_settings = rtl8152_get_settings,
- .set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
.nway_reset = rtl8152_nway_reset,
.get_msglevel = rtl8152_get_msglevel,
@@ -4132,6 +4133,8 @@ static const struct ethtool_ops ops = {
.set_coalesce = rtl8152_set_coalesce,
.get_eee = rtl_ethtool_get_eee,
.set_eee = rtl_ethtool_set_eee,
+ .get_link_ksettings = rtl8152_get_link_ksettings,
+ .set_link_ksettings = rtl8152_set_link_ksettings,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index c81c79110cef..daaa88a66f40 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -791,47 +791,52 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
-static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int rtl8150_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
short lpa, bmcr;
+ u32 supported;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP | SUPPORTED_MII);
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = dev->phy;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.phy_address = dev->phy;
get_registers(dev, BMCR, 2, &bmcr);
get_registers(dev, ANLP, 2, &lpa);
if (bmcr & BMCR_ANENABLE) {
u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->base.speed = speed;
+ ecmd->base.autoneg = AUTONEG_ENABLE;
if (speed == SPEED_100)
- ecmd->duplex = (lpa & LPA_100FULL) ?
+ ecmd->base.duplex = (lpa & LPA_100FULL) ?
DUPLEX_FULL : DUPLEX_HALF;
else
- ecmd->duplex = (lpa & LPA_10FULL) ?
+ ecmd->base.duplex = (lpa & LPA_10FULL) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- ecmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10));
- ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ecmd->base.speed = ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10);
+ ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+
return 0;
}
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8150_get_drvinfo,
- .get_settings = rtl8150_get_settings,
- .get_link = ethtool_op_get_link
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = rtl8150_get_link_ksettings,
};
static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bdb6ae16d4a8..09855be219e9 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -276,9 +276,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
send_eth = send_ip = true;
if (type == RTM_GETNEIGH) {
- ndm->ndm_family = AF_INET;
send_ip = !vxlan_addr_any(&rdst->remote_ip);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 64e9f507ce32..414f2a772a5f 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
- qedf_attr.o qedf_els.o
+ qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644
index 000000000000..bb812db48da6
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -0,0 +1,190 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID ((u32)0x0000FFFF)
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+ enum fcoe_sqe_request_type request_type)
+{
+ memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+ request_type);
+ task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32])
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 io_size, val;
+ bool slow_sgl;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+ slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+ task_params->tx_io_size : task_params->rx_io_size);
+
+ /* Ystorm ctx */
+ y_st_ctx = &ctx->ystorm_st_context;
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
+ fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+ /* Tstorm ctx */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+ /* Ustorm ctx */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Mstorm buffer for sense/rsp data placement */
+ m_st_ctx = &ctx->mstorm_st_context;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+ m_st_ctx->rsp_buf_addr.hi = val;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+ m_st_ctx->rsp_buf_addr.lo = val;
+
+ if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+ /* Ystorm ctx */
+ y_st_ctx->expect_first_xfer = 1;
+
+ /* Set the amount of super SGEs. Can be up to 4. */
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ sgl_task_params);
+
+ /* Mstorm ctx */
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ } else {
+ /* Tstorm ctx */
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+ /* Mstorm ctx */
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ sgl_task_params);
+ }
+
+ init_common_sqe(task_params, SEND_FCOE_CMD);
+ return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header)
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 val;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+
+ /* Init Ystorm */
+ y_st_ctx = &ctx->ystorm_st_context;
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ tx_sgl_task_params);
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+ mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+ /* Init Mstorm */
+ m_st_ctx = &ctx->mstorm_st_context;
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ rx_sgl_task_params);
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+ fw_to_place_fc_header);
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+ /* Init Tstorm */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+ /* Init Ustorm */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Init SQE */
+ init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+ task_params->sqe->additional_info_union.burst_length =
+ tx_sgl_task_params->total_buffer_size;
+ SET_FIELD(task_params->sqe->flags,
+ FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+ SCSI_FAST_SGL);
+
+ return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+ return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+ return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params, u32 off)
+{
+ init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+ task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644
index 000000000000..617529b058f4
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -0,0 +1,93 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_task_context *context;
+
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_wqe *sqe;
+ enum fcoe_task_type task_type;
+ u32 tx_io_size; /* in bytes */
+ u32 rx_io_size; /* in bytes */
+ u32 conn_cid;
+ u16 itid;
+ u8 cq_rss_number;
+
+ /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+ u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header - Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params,
+ u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644
index 000000000000..11e0cc082ec0
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -0,0 +1,44 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ /* no need to check for sgl_task_params->sgl validity */
+ u8 num_sges_to_init = sgl_task_params->num_sges >
+ SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+ sgl_task_params->num_sges;
+ u8 sge_index;
+ u32 val;
+
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644
index 000000000000..9cb45410bc45
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -0,0 +1,85 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+
+ /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+ * -> relevant for tx only
+ */
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+
+ /* Enable Connection error upon DIF error (segments with DIF errors are
+ * dropped)
+ */
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+ * pointer to the CDB buffer SGE
+ */
+ struct scsi_sge extended_cdb_sge;
+
+ /* Physical address of sense data buffer for sense data - 256B buffer */
+ struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params);
+#endif
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 96346a1b1515..40aeb6bb96a2 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -26,6 +26,7 @@
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
+#include "drv_fcoe_fw_funcs.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@@ -59,19 +60,17 @@
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
- uint8_t tm_flags;
-
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
- struct fcoe_sge *mp_req_bd;
+ struct scsi_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
@@ -119,6 +118,7 @@ struct qedf_ioreq {
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
+ uint8_t tm_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
+ struct fcoe_task_params *task_params;
+ struct scsi_sgl_task_params *sgl_task_params;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@ struct qedf_rport {
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
-#define QEDF_RPORT_TYPE_DISK 1
-#define QEDF_RPORT_TYPE_TAPE 2
+#define QEDF_RPORT_TYPE_DISK 0
+#define QEDF_RPORT_TYPE_TAPE 1
uint dev_type; /* Disk or tape */
struct list_head peers;
};
@@ -391,7 +393,7 @@ struct qedf_ctx {
struct io_bdt {
struct qedf_ioreq *io_req;
- struct fcoe_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
@@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
-#define FCOE_PARAMS_NUM_TASKS 4096
+#define FCOE_PARAMS_NUM_TASKS 2048
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
@@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx);
-extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
- u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59f3e5c73a13..c505d41f6dc8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
+ struct fcoe_wqe *sqe;
+ unsigned long flags;
+ u16 sqe_idx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
@@ -113,20 +116,25 @@ retry_els:
/* Obtain exchange id */
xid = els_req->xid;
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(els_req, task);
+ qedf_init_mp_task(els_req, task, sqe);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
-
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err:
return rc;
}
@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = orig_io_req->fcport;
@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, orig_io_req->xid, 0,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ orig_io_req->task_params->sqe = sqe;
+
+ init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
+ offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 46debe5034af..1d7f90d0adc1 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
if (!cmgr->io_bdt_pool)
goto free_cmd_pool;
- bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+ bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@ free_cmd_pool:
for (i = 0; i < num_ios; i++) {
io_req = &cmgr->cmds[i];
+ kfree(io_req->sgl_task_params);
+ kfree(io_req->task_params);
/* Make sure we free per command sense buffer */
if (io_req->sense_buffer)
dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
spin_lock_init(&cmgr->lock);
/*
- * Initialize list of qedf_ioreq.
+ * Initialize I/O request fields.
*/
xid = QEDF_MIN_XID;
@@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
GFP_KERNEL);
if (!io_req->sense_buffer)
goto mem_err;
+
+ /* Allocate task parameters to pass to f/w init funcions */
+ io_req->task_params = kzalloc(sizeof(*io_req->task_params),
+ GFP_KERNEL);
+ if (!io_req->task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
+
+ /*
+ * Allocate scatter/gather list info to pass to f/w init
+ * functions.
+ */
+ io_req->sgl_task_params = kzalloc(
+ sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
+ if (!io_req->sgl_task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate sgl_task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
}
/* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
GFP_KERNEL);
if (!cmgr->io_bdt_pool[i]) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "io_bdt_pool[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc io_bdt_pool[%d].\n", i);
goto mem_err;
}
}
@@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
- QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+ QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
&bdt_info->bd_tbl_dma, GFP_KERNEL);
if (!bdt_info->bd_tbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "bdt_tbl[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc bdt_tbl[%d].\n", i);
goto mem_err;
}
}
@@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
}
bd_tbl->io_req = io_req;
io_req->cmd_type = cmd_type;
+ io_req->tm_flags = 0;
/* Reset sequence offset data */
io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_ctx *qedf = io_req->fcport->qedf;
- uint64_t sz = sizeof(struct fcoe_sge);
+ uint64_t sz = sizeof(struct scsi_sge);
/* clear tm flags */
- mp_req->tm_flags = 0;
if (mp_req->mp_req_bd) {
dma_free_coherent(&qedf->pdev->dev, sz,
mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
int bd_index)
{
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
frag_size = sg_len;
bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
- bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+ bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
addr += (u64)frag_size;
sg_frags++;
@@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
struct scatterlist *sg;
int byte_count = 0;
int sg_count = 0;
@@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
bd[bd_count].sge_addr.hi = (addr >> 32);
- bd[bd_count].size = (u16)sg_len;
+ bd[bd_count].sge_len = (u16)sg_len;
return ++bd_count;
}
@@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
sg_frags = 1;
bd[bd_count].sge_addr.lo = U64_LO(addr);
bd[bd_count].sge_addr.hi = U64_HI(addr);
- bd[bd_count].size = (uint16_t)sg_len;
+ bd[bd_count].sge_len = (uint16_t)sg_len;
}
bd_count += sg_frags;
@@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int bd_count;
if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
} else {
bd_count = 0;
bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
- bd[0].size = 0;
+ bd[0].sge_len = 0;
}
io_req->bd_tbl->bd_valid = bd_count;
@@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
/* 4 bytes: flag info */
fcp_cmnd->fc_pri_ta = 0;
- fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_tm_flags = io_req->tm_flags;
fcp_cmnd->fc_flags = io_req->io_req_flags;
fcp_cmnd->fc_cmdref = 0;
/* Populate data direction */
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
- else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ } else {
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+ else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ }
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
/* 16 bytes: CDB information */
- memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
/* 4 bytes: FCP data length */
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, u32 *ptu_invalidate,
- struct fcoe_task_context *task_ctx)
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
- union fcoe_data_desc_ctx *data_desc;
- u32 *fcp_cmnd;
+ u8 fcp_cmnd[32];
u32 tmp_fcp_cmnd[8];
- int cnt, i;
- int bd_count;
+ int bd_count = 0;
struct qedf_ctx *qedf = fcport->qedf;
uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
- u8 tmp_sgl_mode = 0;
- u8 mst_sgl_mode = 0;
+ struct regpair sense_data_buffer_phys_addr;
+ u32 tx_io_size = 0;
+ u32 rx_io_size = 0;
+ int i, cnt;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ /* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
+ memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
- else
+ /* Set task type bassed on DMA directio of command */
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
task_type = FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 1;
- task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
- /* Check if this is required */
- task_ctx->ystorm_st_context.ox_id = io_req->xid;
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* T Storm ag context */
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-
- /* T Storm st context */
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-
- task_ctx->tstorm_st_context.read_only.dev_type =
- FCOE_TASK_DEV_TYPE_DISK;
- task_ctx->tstorm_st_context.read_only.conf_supported = 0;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-
- /* Completion queue for response. */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
- task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
- io_req->data_xfer_len;
- task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
- lport->e_d_tov;
-
- task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
- io_req->fp_idx = cq_idx;
-
- bd_count = bd_tbl->bd_valid;
- if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
- /* Setup WRITE task */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
- data_desc = &task_ctx->ystorm_st_context.data_desc;
-
- if (io_req->use_slowpath) {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges = bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
- } else {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset =
- data_desc->fast.sgl_start_addr.lo &
- (QEDF_PAGE_SIZE - 1);
- if (data_desc->fast.sgl_byte_offset > 0)
- QEDF_ERR(&(qedf->dbg_ctx),
- "byte_offset=%u for xid=0x%x.\n",
- io_req->xid,
- data_desc->fast.sgl_byte_offset);
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
- }
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
-
- /* M Storm context */
- tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
- SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
- tmp_sgl_mode);
-
} else {
- /* Setup READ task */
-
- /* M Storm context */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
- task_ctx->mstorm_st_context.fp.data_2_trns_rem =
- io_req->data_xfer_len;
-
- if (io_req->use_slowpath) {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges =
- bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+ tx_io_size = io_req->data_xfer_len;
} else {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset = 0;
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
+ task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+ rx_io_size = io_req->data_xfer_len;
}
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 0;
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
- mst_sgl_mode = GET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
- mst_sgl_mode);
}
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = task_type;
+ io_req->task_params->tx_io_size = tx_io_size;
+ io_req->task_params->rx_io_size = rx_io_size;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ io_req->task_params->cq_rss_number = cq_idx;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ /* Fill in information for scatter/gather list */
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
+ bd_count = bd_tbl->bd_valid;
+ io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
+ io_req->sgl_task_params->sgl_phys_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->sgl_phys_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->num_sges = bd_count;
+ io_req->sgl_task_params->total_buffer_size =
+ scsi_bufflen(io_req->sc_cmd);
+ io_req->sgl_task_params->small_mid_sge =
+ io_req->use_slowpath;
+ }
+
+ /* Fill in physical address of sense buffer */
+ sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
+ sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
+
/* fill FCP_CMND IU */
- fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
/* Swap fcp_cmnd since FC is big endian */
cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
-
for (i = 0; i < cnt; i++) {
- *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
- fcp_cmnd++;
+ tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
+ }
+ memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
+
+ init_initiator_rw_fcoe_task(io_req->task_params,
+ io_req->sgl_task_params,
+ sense_data_buffer_phys_addr,
+ io_req->task_retry_identifier, fcp_cmnd);
+
+ /* Increment SGL type counters */
+ if (bd_count == 1) {
+ qedf->single_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+ } else if (io_req->use_slowpath) {
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ } else {
+ qedf->fast_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
-
- /* M Storm context - Sense buffer */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(io_req->sense_buffer_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(io_req->sense_buffer_dma);
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
struct qedf_ctx *qedf = io_req->fcport->qedf;
struct fc_frame_header *fc_hdr;
- enum fcoe_task_type task_type = 0;
- union fcoe_data_desc_ctx *data_desc;
+ struct fcoe_tx_mid_path_params task_fc_hdr;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
- "for cmd_type = %d\n", io_req->cmd_type);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Initializing MP task for cmd_type=%d\n",
+ io_req->cmd_type);
qedf->control_requests++;
- /* Obtain task_type */
- if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
- (io_req->cmd_type == QEDF_ELS)) {
- task_type = FCOE_TASK_TYPE_MIDPATH;
- } else if (io_req->cmd_type == QEDF_ABTS) {
- task_type = FCOE_TASK_TYPE_ABTS;
- }
-
+ memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
io_req->task = task_ctx;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
- task_type);
-
- /* YSTORM only */
- {
- /* Initialize YSTORM task context */
- struct fcoe_tx_mid_path_params *task_fc_hdr =
- &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
- memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* Init SGL parameters */
- if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
- (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
- data_desc = &task_ctx->ystorm_st_context.data_desc;
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_req_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_req_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- }
-
- fc_hdr = &(mp_req->req_fc_hdr);
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- fc_hdr->fh_ox_id = io_req->xid;
- fc_hdr->fh_rx_id = htons(0xffff);
- } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
- fc_hdr->fh_rx_id = io_req->xid;
- }
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
+ io_req->task_params->tx_io_size = io_req->data_xfer_len;
+ /* rx_io_size tells the f/w how large a response buffer we have */
+ io_req->task_params->rx_io_size = PAGE_SIZE;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ /* Return middle path commands on CQ 0 */
+ io_req->task_params->cq_rss_number = 0;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ fc_hdr = &(mp_req->req_fc_hdr);
+ /* Set OX_ID and RX_ID based on driver task id */
+ fc_hdr->fh_ox_id = io_req->xid;
+ fc_hdr->fh_rx_id = htons(0xffff);
+
+ /* Set up FC header information */
+ task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
+ task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
+ task_fc_hdr.type = fc_hdr->fh_type;
+ task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
+ task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
+ task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
+ task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
+
+ /* Set up s/g list parameters for request buffer */
+ tx_sgl_task_params.sgl = mp_req->mp_req_bd;
+ tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
+ tx_sgl_task_params.small_mid_sge = 0;
+
+ /* Set up s/g list parameters for request buffer */
+ rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
+ rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
+ rx_sgl_task_params.small_mid_sge = 0;
- /* Fill FC Header into middle path buffer */
- task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
- task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
- task_fc_hdr->type = fc_hdr->fh_type;
- task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
- task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
- task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
- task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
-
- task_ctx->ystorm_st_context.data_2_trns_rem =
- io_req->data_xfer_len;
- task_ctx->ystorm_st_context.task_type = task_type;
- }
-
- /* TSTORM ONLY */
- {
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
- /* Always send middle-path repsonses on CQ #0 */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
- io_req->fp_idx = 0;
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
- PROTOCOLID_FCOE);
- task_ctx->tstorm_st_context.read_only.task_type = task_type;
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
- }
-
- /* MSTORM only */
- {
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- /* Initialize task context */
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-
- /* Set cache sges address and length */
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- /*
- * Also need to fil in non-fastpath response address
- * for middle path commands.
- */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- }
- }
-
- /* USTORM ONLY */
- {
- task_ctx->ustorm_ag_context.global_cq_num = 0;
- }
+ /*
+ * Last arg is 0 as previous code did not set that we wanted the
+ * fc header information.
+ */
+ init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
+ &task_fc_hdr,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params, 0);
- /* I/O stats. Middle path commands always use slow SGEs */
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ /* Midpath requests always consume 1 SGE */
+ qedf->single_sge_ios++;
}
-void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
- enum fcoe_task_type req_type, u32 offset)
+/* Presumed that fcport->rport_lock is held */
+u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
{
- struct fcoe_wqe *sqe;
uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+ u16 rval;
- sqe = &fcport->sq[fcport->sq_prod_idx];
+ rval = fcport->sq_prod_idx;
+ /* Adjust ring index */
fcport->sq_prod_idx++;
fcport->fw_sq_prod_idx++;
if (fcport->sq_prod_idx == total_sqe)
fcport->sq_prod_idx = 0;
- switch (req_type) {
- case FCOE_TASK_TYPE_WRITE_INITIATOR:
- case FCOE_TASK_TYPE_READ_INITIATOR:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
- if (ptu_invalidate)
- SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
- break;
- case FCOE_TASK_TYPE_MIDPATH:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
- break;
- case FCOE_TASK_TYPE_ABTS:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- SEND_FCOE_ABTS_REQUEST);
- break;
- case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_EXCHANGE_CLEANUP);
- break;
- case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_SEQUENCE_RECOVERY);
- /* NOTE: offset param only used for sequence recovery */
- sqe->additional_info_union.seq_rec_updated_offset = offset;
- break;
- case FCOE_TASK_TYPE_UNSOLICITED:
- break;
- default:
- break;
- }
-
- sqe->task_id = xid;
-
- /* Make sure SQ data is coherent */
- wmb();
-
+ return rval;
}
void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
- u32 ptu_invalidate = 0;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EAGAIN;
}
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+
+ /* Obtain free SQE */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Get the task context */
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
if (!task_ctx) {
@@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
-
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- }
-
- /* Obtain free SQ entry */
- qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+ qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
/* Ring doorbell */
qedf_ring_doorbell(fcport);
@@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
u32 r_a_tov = 0;
int rc = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
r_a_tov = rdata->r_a_tov;
lport = qedf->lport;
@@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
spin_lock_irqsave(&fcport->rport_lock, flags);
- /* Add ABTS to send queue */
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
- /* Ring doorbell */
+ init_initiator_abort_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int qedf_init_mp_req(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req;
- struct fcoe_sge *mp_req_bd;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_req_bd;
+ struct scsi_sge *mp_resp_bd;
struct qedf_ctx *qedf = io_req->fcport->qedf;
dma_addr_t addr;
uint64_t sz;
@@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
}
/* Allocate and map mp_req_bd and mp_resp_bd */
- sz = sizeof(struct fcoe_sge);
+ sz = sizeof(struct scsi_sge);
mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
&mp_req->mp_req_bd_dma, GFP_KERNEL);
if (!mp_req->mp_req_bd) {
@@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->sge_addr.lo = U64_LO(addr);
mp_req_bd->sge_addr.hi = U64_HI(addr);
- mp_req_bd->size = QEDF_PAGE_SIZE;
+ mp_req_bd->sge_len = QEDF_PAGE_SIZE;
/*
* MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->sge_addr.lo = U64_LO(addr);
mp_resp_bd->sge_addr.hi = U64_HI(addr);
- mp_resp_bd->size = QEDF_PAGE_SIZE;
+ mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
return 0;
}
@@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = io_req->fcport;
if (!fcport) {
@@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
+
+ init_initiator_cleanup_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct qedf_mp_req *tm_req;
struct fcoe_task_context *task;
- struct fc_frame_header *fc_hdr;
- struct fcp_cmnd *fcp_cmnd;
struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_lport *lport = qedf->lport;
int rc = 0;
uint16_t xid;
- uint32_t sid, did;
int tmo = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
if (!sc_cmd) {
QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Set the return CPU to be the same as the request one */
io_req->cpu = smp_processor_id();
- tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
-
- rc = qedf_init_mp_req(io_req);
- if (rc == FAILED) {
- QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
- "failed\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- goto reset_tmf_err;
- }
-
/* Set TM flags */
- io_req->io_req_flags = 0;
- tm_req->tm_flags = tm_flags;
+ io_req->io_req_flags = QEDF_READ;
+ io_req->data_xfer_len = 0;
+ io_req->tm_flags = tm_flags;
/* Default is to return a SCSI command when an error occurs */
io_req->return_scsi_cmd_on_abts = true;
- /* Fill FCP_CMND */
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
- fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
- memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
- fcp_cmnd->fc_dl = 0;
-
- /* Fill FC header */
- fc_hdr = &(tm_req->req_fc_hdr);
- sid = fcport->sid;
- did = fcport->rdata->ids.port_id;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
- FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = io_req->xid;
@@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(io_req, task);
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
+ qedf_init_task(fcport, lport, io_req, task, sqe);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
struct fcoe_cqe_rsp_info *fcp_rsp;
- struct fcoe_cqe_midpath_info *mp_info;
-
-
- /* Get TMF response length from CQE */
- mp_info = &cqe->cqe_info.midpath_info;
- io_req->mp_req.resp_len = mp_info->data_placement_size;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
- "Response len is %d.\n", io_req->mp_req.resp_len);
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 2b3e16b24299..90a6925577cc 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDI) := qedi.o
qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
- qedi_dbg.o
+ qedi_dbg.o qedi_fw_api.o
qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 2bce3efc66a4..d6978cbc56f0 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,6 +14,8 @@
#include "qedi.h"
#include "qedi_iscsi.h"
#include "qedi_gbl.h"
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask);
@@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
- resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
- resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@ exit_fp_process:
return;
}
-static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
- u16 tid, uint16_t ptu_invalidate, int is_cleanup)
-{
- struct iscsi_wqe *wqe;
- struct iscsi_wqe_field *cont_field;
- struct qedi_endpoint *ep;
- struct scsi_cmnd *sc = task->sc;
- struct iscsi_login_req *login_hdr;
- struct qedi_cmd *cmd = task->dd_data;
-
- login_hdr = (struct iscsi_login_req *)task->hdr;
- ep = qedi_conn->ep;
- wqe = &ep->sq[ep->sq_prod_idx];
-
- memset(wqe, 0, sizeof(*wqe));
-
- ep->sq_prod_idx++;
- ep->fw_sq_prod_idx++;
- if (ep->sq_prod_idx == QEDI_SQ_SIZE)
- ep->sq_prod_idx = 0;
-
- if (is_cleanup) {
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_TASK_CLEANUP);
- wqe->task_id = tid;
- return;
- }
-
- if (ptu_invalidate) {
- SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
- ISCSI_WQE_SET_PTU_INVALIDATE);
- }
-
- cont_field = &wqe->cont_prevtid_union.cont_field;
-
- switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
- case ISCSI_OP_LOGIN:
- case ISCSI_OP_TEXT:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_MIDDLE_PATH);
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- 1);
- cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
- break;
- case ISCSI_OP_LOGOUT:
- case ISCSI_OP_NOOP_OUT:
- case ISCSI_OP_SCSI_TMFUNC:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- break;
- default:
- if (!sc)
- break;
-
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- cont_field->contlen_cdbsize_field =
- (sc->sc_data_direction == DMA_TO_DEVICE) ?
- scsi_bufflen(sc) : 0;
- if (cmd->use_slowpath)
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
- else
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- (sc->sc_data_direction ==
- DMA_TO_DEVICE) ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- break;
- }
-
- wqe->task_id = tid;
- /* Make sure SQ data is coherent */
- wmb();
-}
-
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
{
struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
qedi_conn->iscsi_conn_id);
}
+static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
+{
+ struct qedi_endpoint *ep;
+ u16 rval;
+
+ ep = qedi_conn->ep;
+ rval = ep->sq_prod_idx;
+
+ /* Increament SQ index */
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ return rval;
+}
+
int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_login_req_hdr login_req_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
- struct iscsi_login_req_hdr *fw_login_req = NULL;
- struct iscsi_cached_sge_ctx *cached_sge = NULL;
- struct iscsi_sge *single_sge = NULL;
- struct iscsi_sge *req_sge = NULL;
- struct iscsi_sge *resp_sge = NULL;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
- s16 ptu_invalidate = 0;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ ep = qedi_conn->ep;
login_hdr = (struct iscsi_login_req *)task->hdr;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
- fw_login_req->opcode = login_hdr->opcode;
- fw_login_req->version_min = login_hdr->min_version;
- fw_login_req->version_max = login_hdr->max_version;
- fw_login_req->flags_attr = login_hdr->flags;
- fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
- fw_login_req->isid_d = *((u32 *)login_hdr->isid);
- fw_login_req->tsih = login_hdr->tsih;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_login_req->cid = qedi_conn->iscsi_conn_id;
- fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
- fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
- fw_login_req->exp_stat_sn = 0;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ /* Update header info */
+ login_req_pdu_header.opcode = login_hdr->opcode;
+ login_req_pdu_header.version_min = login_hdr->min_version;
+ login_req_pdu_header.version_max = login_hdr->max_version;
+ login_req_pdu_header.flags_attr = login_hdr->flags;
+ login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
+ login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
+
+ login_req_pdu_header.tsih = login_hdr->tsih;
+ login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(login_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(login_hdr->dlength);
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
+ login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_req_pdu_header.exp_stat_sn = 0;
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(login_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_login_request_task(&task_params,
+ &login_req_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_logout_req_hdr *fw_logout_req = NULL;
- struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout_req_hdr logout_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
- struct qedi_cmd *qedi_cmd = NULL;
- s16 tid = 0;
- s16 ptu_invalidate = 0;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
-
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
- fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
- fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
- fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- fw_logout_req->cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ /* Update header info */
+ logout_pdu_header.opcode = logout_hdr->opcode;
+ logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ rval = init_initiator_logout_request_task(&task_params,
+ &logout_pdu_header,
+ NULL, NULL);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1533,47 +1476,46 @@ ldel_exit:
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask)
{
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_tmf_request_hdr tmf_pdu_header;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_tmf_request_hdr *fw_tmf_request;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- struct qedi_cmd *cmd;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 tid = 0, ptu_invalidate = 0;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
- qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ ep = qedi_conn->ep;
- tid = qedi_cmd->task_id;
- qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
- fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
- fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+ qedi_cmd->task_id = tid;
- memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
- fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
- fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
+ /* Update header info */
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
cmd = (struct qedi_cmd *)ctask->dd_data;
- fw_tmf_request->rtt =
+ tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
get_itt(tmf_hdr->rtt));
} else {
- fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
}
- fw_tmf_request->opcode = tmf_hdr->opcode;
- fw_tmf_request->function = tmf_hdr->flags;
- fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
- fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+ tmf_pdu_header.opcode = tmf_hdr->opcode;
+ tmf_pdu_header.function = tmf_hdr->flags;
+ tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
- tid, mtask->itt, qedi_conn->iscsi_conn_id);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_tmf_request_task(&task_params,
+ &tmf_pdu_header);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_text_request_hdr text_request_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_text_request_hdr *fw_text_request;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
text_hdr = (struct iscsi_text *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_text_request =
- &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
- fw_text_request->opcode = text_hdr->opcode;
- fw_text_request->flags_attr = text_hdr->flags;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ text_request_pdu_header.opcode = text_hdr->opcode;
+ text_request_pdu_header.flags_attr = text_hdr->flags;
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_text_request->ttt = text_hdr->ttt;
- fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
- fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
- fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
+ text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ text_request_pdu_header.ttt = text_hdr->ttt;
+ text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(text_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_text_request_task(&task_params,
+ &text_request_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(text_hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- /* Add command in active command list */
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
qedi_cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct iscsi_task *task,
char *datap, int data_len, int unsol)
{
+ struct iscsi_nop_out_hdr nop_out_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_nop_out_hdr *fw_nop_out;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
struct iscsi_nopout *nopout_hdr;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ if (tid == -1)
return -ENOMEM;
- }
-
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ nop_out_pdu_header.opcode = nopout_hdr->opcode;
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
- memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
- fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
- fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+ memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
+ nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
- fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
- fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
- fw_task_ctx->ystorm_st_context.state.local_comp = 1;
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
+ nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
} else {
- fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
spin_unlock(&qedi_conn->list_lock);
}
- fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
- fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
- fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ /* Fill tx AHS and rx buffer */
+ if (data_len) {
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = data_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+ }
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = data_len;
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_nop_out_task(&task_params,
+ &nop_out_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
+
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
int bd_index)
{
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
{
struct scsi_cmnd *sc = cmd->scsi_cmd;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
struct scatterlist *sg;
int byte_count = 0;
int bd_count = 0;
@@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
if (bd_count == 0)
return;
} else {
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
bd[0].sge_addr.lo = 0;
bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct iscsi_cmd_hdr cmd_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct scsi_sgl_task_params *prx_sgl = NULL;
+ struct scsi_sgl_task_params *ptx_sgl = NULL;
+ struct iscsi_task_params task_params;
+ struct iscsi_conn_params conn_params;
+ struct scsi_initiator_cmd_params cmd_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_phys_sgl_ctx *phys_sgl;
- struct iscsi_virt_sgl_ctx *virt_sgl;
- struct ystorm_iscsi_task_st_ctx *yst_cxt;
- struct mstorm_iscsi_task_st_ctx *mst_cxt;
- struct iscsi_sgl *sgl_struct;
- struct iscsi_sge *single_sge;
+ struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
- enum iscsi_task_type task_type;
- struct iscsi_cmd_hdr *fw_cmd;
- u32 lun[2];
- u32 exp_data;
- u16 cq_idx = smp_processor_id() % qedi->num_queues;
- s16 ptu_invalidate = 0;
+ enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
- u8 num_fast_sgs;
+ u16 sq_idx = 0;
+ u16 cq_idx;
+ int rval = 0;
- tid = qedi_get_task_idx(qedi);
- if (tid == -1)
- return -ENOMEM;
+ ep = qedi_conn->ep;
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
qedi_iscsi_map_sg_list(cmd);
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- cmd->task_id = tid;
- /* Ystorm context */
- fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+ cmd->task_id = tid;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ memset(&conn_params, 0, sizeof(conn_params));
+ memset(&cmd_params, 0, sizeof(cmd_params));
+
+ cq_idx = smp_processor_id() % qedi->num_queues;
+ /* Update header info */
+ SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
+ ISCSI_ATTR_SIMPLE);
if (sc->sc_data_direction == DMA_TO_DEVICE) {
- if (conn->session->initial_r2t_en) {
- exp_data = min((conn->session->imm_data_en *
- conn->max_xmit_dlength),
- conn->session->first_burst);
- exp_data = min(exp_data, scsi_bufflen(sc));
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- cpu_to_le32(exp_data);
- } else {
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- min(conn->session->first_burst, scsi_bufflen(sc));
- }
-
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
} else {
- if (scsi_bufflen(sc))
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
}
- fw_cmd->lun.lo = be32_to_cpu(lun[0]);
- fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+ cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
qedi_update_itt_map(qedi, tid, task->itt, cmd);
- fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_cmd->expected_transfer_length = scsi_bufflen(sc);
- fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
- fw_cmd->opcode = hdr->opcode;
- qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
- fw_task_ctx->mstorm_st_context.sense_db.hi =
- (u32)((u64)cmd->sense_buffer_dma >> 32);
- fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->mstorm_st_context.task_type = task_type;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.task_type = task_type;
- fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-
- num_fast_sgs = (cmd->io_tbl.sge_valid ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
- cmd->io_tbl.sge_valid);
-
- yst_cxt = &fw_task_ctx->ystorm_st_context;
- mst_cxt = &fw_task_ctx->mstorm_st_context;
- /* Tx path */
+ cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
+ cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
+ cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd_pdu_header.opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
+
+ /* Fill tx AHS and rx buffer */
if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
- /* not considering superIO or FastIO */
- if (cmd->io_tbl.sge_valid == 1) {
- cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
- cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
- cached_sge->sge.sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
- phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- phys_sgl->sgl_base.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES,
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid));
- virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
- virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- virt_sgl->sgl_base.hi =
+ tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- virt_sgl->sgl_initial_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- } else {
- /* Rx path */
- if (cmd->io_tbl.sge_valid == 1) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- single_sge = &mst_cxt->sgl_union.single_sge;
- single_sge->sge_addr.lo = bd[0].sge_addr.lo;
- single_sge->sge_addr.hi = bd[0].sge_addr.hi;
- single_sge->sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- sgl_struct->byte_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- }
-
- if (cmd->io_tbl.sge_valid == 1)
- /* Singel-SGL */
- qedi->use_cached_sge = true;
- else {
+ tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
if (cmd->use_slowpath)
- qedi->use_slow_sge = true;
- else
- qedi->use_fast_sge = true;
- }
+ tx_sgl_task_params.small_mid_sge = true;
+ } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
+ }
+
+ /* Add conn param */
+ conn_params.first_burst_length = conn->session->first_burst;
+ conn_params.max_send_pdu_length = conn->max_xmit_dlength;
+ conn_params.max_burst_length = conn->session->max_burst;
+ if (conn->session->initial_r2t_en)
+ conn_params.initial_r2t = true;
+ if (conn->session->imm_data_en)
+ conn_params.immediate_data = true;
+
+ /* Add cmd params */
+ cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
+ cmd_params.sense_data_buffer_phys_addr.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = cq_idx;
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
+ task_params.tx_io_size = scsi_bufflen(sc);
+ else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
+ task_params.rx_io_size = scsi_bufflen(sc);
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
- "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
(task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
"Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
"Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
- (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
+ (u32)(cmd->io_tbl.sge_tbl_dma),
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
- /* Add command in active command list */
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ if (task_params.tx_io_size != 0)
+ ptx_sgl = &tx_sgl_task_params;
+ if (task_params.rx_io_size != 0)
+ prx_sgl = &rx_sgl_task_params;
+
+ rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
+ &cmd_params, &cmd_pdu_header,
+ ptx_sgl, prx_sgl,
+ NULL);
+ if (rval)
+ return -1;
+
spin_lock(&qedi_conn->list_lock);
list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
- if (qedi_io_tracing)
- qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
-
return 0;
}
int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
{
+ struct iscsi_task_params task_params;
+ struct qedi_endpoint *ep;
struct iscsi_conn *conn = task->conn;
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
- s16 ptu_invalidate = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
cmd->task_id, get_itt(task->itt), task->state,
cmd->state, qedi_conn->iscsi_conn_id);
- qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
- qedi_ring_doorbell(qedi_conn);
+ memset(&task_params, 0, sizeof(task_params));
+ ep = qedi_conn->ep;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ task_params.itid = cmd->task_id;
+ rval = init_cleanup_task(&task_params);
+ if (rval)
+ return rval;
+
+ qedi_ring_doorbell(qedi_conn);
return 0;
}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644
index 000000000000..fd354d4e03eb
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -0,0 +1,781 @@
+/* QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ u8 sge_index;
+ u8 num_sges;
+ u32 val;
+
+ num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+ SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+ /* sgl params */
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 io_size;
+
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_TARGET_READ)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (!io_size)
+ return 0;
+
+ if (!dif_task_params)
+ return io_size;
+
+ return !dif_task_params->dif_on_network ?
+ io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (!dif_task_params)
+ return;
+
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+ dif_task_params->dif_block_size_log);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_initiator_cmd_params *cmd_params,
+ enum iscsi_task_type task_type,
+ bool is_cleanup)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+ if (is_cleanup) {
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ return;
+ }
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ init_dif_context_flags(&task_params->sqe->prot_flags,
+ dif_task_params);
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (task_params->tx_io_size) {
+ buf_size = calc_rw_task_size(task_params, task_type,
+ sgl_task_params,
+ dif_task_params);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ }
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ break;
+ case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+ case ISCSI_TASK_TYPE_MIDPATH:
+ {
+ bool advance_statsn = true;
+
+ if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_LOGIN);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+ u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+ ISCSI_COMMON_HDR_OPCODE);
+
+ if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+ (opcode != ISCSI_OPCODE_NOP_IN ||
+ pdu_header->itt == ISCSI_TTT_ALL_ONES))
+ advance_statsn = false;
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+ advance_statsn ? 1 : 0);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ ISCSI_WQE_NUM_SGES_SLOWIO);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+ struct data_hdr *pdu_header,
+ enum iscsi_task_type task_type)
+{
+ struct iscsi_task_context *context;
+ u16 index;
+ u32 val;
+
+ context = task_params->context;
+ memset(context, 0, sizeof(*context));
+
+ for (index = 0; index <
+ ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+ index++) {
+ val = cpu_to_le32(pdu_header->data[index]);
+ context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+ }
+
+ context->mstorm_st_context.task_type = task_type;
+ context->mstorm_ag_context.task_cid =
+ cpu_to_le16(task_params->conn_icid);
+
+ SET_FIELD(context->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ context->ustorm_st_context.task_type = task_type;
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+ struct scsi_initiator_cmd_params *cmd)
+{
+ union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+ u32 val;
+
+ if (!cmd->extended_cdb_sge.sge_len)
+ return;
+
+ SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+ ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+ cmd->extended_cdb_sge.sge_len);
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
+{
+ u32 val;
+
+ ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ val = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_cxt->exp_data_transfer_len = val;
+ SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_cxt->flags2,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+ struct iscsi_conn_params *conn_params,
+ enum iscsi_task_type task_type,
+ u32 task_size,
+ u32 exp_data_transfer_len,
+ u8 total_ahs_length)
+{
+ u32 max_unsolicited_data = 0, val;
+
+ if (total_ahs_length &&
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+ SET_FIELD(context->ustorm_st_context.flags2,
+ USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ if (!conn_params->initial_r2t)
+ max_unsolicited_data = conn_params->first_burst_length;
+ else if (conn_params->immediate_data)
+ max_unsolicited_data =
+ min(conn_params->first_burst_length,
+ conn_params->max_send_pdu_length);
+
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(total_ahs_length == 0 ?
+ min(exp_data_transfer_len,
+ max_unsolicited_data) :
+ ((u32)(total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE)));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_READ:
+ val = cpu_to_le32(exp_data_transfer_len);
+ context->ustorm_ag_context.exp_data_acked = val;
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32((total_ahs_length == 0 ? 0 :
+ total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_WRITE:
+ val = cpu_to_le32(task_size);
+ context->ustorm_ag_context.exp_cont_len = val;
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+ struct tdif_task_context *tdif_context,
+ struct scsi_dif_task_params *dif_task_params,
+ enum iscsi_task_type task_type)
+{
+ u32 val;
+
+ if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+ return;
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ rdif_context->initial_ref_tag = val;
+ rdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(rdif_context->state,
+ RDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag);
+ }
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ tdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ tdif_context->partial_crc_valueB =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ tdif_context->partial_crc_value_a =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ tdif_context->initial_ref_tag = val;
+ tdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag ? 1 : 0);
+ }
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct iscsi_conn_params *conn_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 exp_data_transfer_len = conn_params->max_burst_length;
+ struct iscsi_task_context *cxt;
+ bool slow_io = false;
+ u32 task_size, val;
+ u8 num_sges = 0;
+
+ task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+ dif_task_params);
+
+ init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+ task_type);
+
+ cxt = task_params->context;
+
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+
+ if (task_params->tx_io_size) {
+ init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+
+ slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+
+ num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+
+ if (slow_io) {
+ SET_FIELD(cxt->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+ cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ if (exp_data_transfer_len > task_size ||
+ task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+ exp_data_transfer_len = task_size;
+
+ init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+ &task_params->context->ustorm_ag_context,
+ task_size, exp_data_transfer_len, num_sges,
+ dif_task_params ?
+ dif_task_params->tx_dif_conn_err_en : false);
+
+ set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+ task_type, task_size,
+ exp_data_transfer_len,
+ GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+ if (dif_task_params)
+ init_rtdif_task_context(&task_params->context->rdif_context,
+ &task_params->context->tdif_context,
+ dif_task_params, task_type);
+
+ init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+ cmd_params, task_type, false);
+
+ return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_WRITE,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ tx_sgl_params, cmd_params,
+ dif_task_params);
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_READ,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ rx_sgl_params, cmd_params,
+ dif_task_params);
+ else
+ return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)login_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0,
+ 0);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)login_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)nop_out_pdu_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+ set_local_completion_context(task_params->context);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size :
+ 0);
+
+ init_sqe(task_params, tx_sgl_task_params, NULL,
+ (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)logout_hdr,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)logout_hdr, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header)
+{
+ init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_sqe(task_params, NULL, NULL,
+ (struct iscsi_common_hdr *)tmf_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)text_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0, 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)text_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+ true);
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644
index 000000000000..b6f24f91849d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -0,0 +1,117 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+ struct iscsi_task_context *context;
+ struct iscsi_wqe *sqe;
+ u32 tx_io_size;
+ u32 rx_io_size;
+ u16 conn_icid;
+ u16 itid;
+ u8 cq_rss_number;
+};
+
+struct iscsi_conn_params {
+ u32 first_burst_length;
+ u32 max_send_pdu_length;
+ u32 max_burst_length;
+ bool initial_r2t;
+ bool immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param conn_params - Connection Parameters
+ * @param cmd_params - command specific parameters
+ * @param cmd_pdu_header - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param login_req_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param nop_out_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param logout_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param text_request_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644
index 000000000000..cdaf918f1019
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ struct scsi_sge extended_cdb_sge;
+ struct regpair sense_data_buffer_phys_addr;
+};
+#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4cc474364c50..d1de172bebac 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
if (cmd->io_tbl.sge_tbl)
dma_free_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
- sizeof(struct iscsi_sge),
+ sizeof(struct scsi_sge),
cmd->io_tbl.sge_tbl,
cmd->io_tbl.sge_tbl_dma);
@@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
- struct iscsi_sge *sge;
+ struct scsi_sge *sge;
io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
{
- struct iscsi_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
qedi_conn->gen_pdu.req_buf;
- bd_tbl->reserved0 = 0;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
- bd_tbl->reserved0 = 0;
}
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index d3c06bbddb4e..3247287cb0e7 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -102,7 +102,7 @@ struct qedi_endpoint {
#define QEDI_SQ_WQES_MIN 16
struct qedi_io_bdt {
- struct iscsi_sge *sge_tbl;
+ struct scsi_sge *sge_tbl;
dma_addr_t sge_tbl_dma;
u16 sge_valid;
};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 9543a1b139d4..d61e3ac22e67 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_MODULE_VERSION "8.10.4.0"
#define QEDI_DRIVER_MAJOR_VER 8
#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_REV_VER 4
#define QEDI_DRIVER_ENG_VER 0
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 55e517130311..abcda9b458ab 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,9 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7260 0xae025190
+#define PHY_ID_BCM7268 0xae025090
+#define PHY_ID_BCM7271 0xae0253b0
#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c62b709b1ce0..2d9f80848d4b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -447,21 +447,6 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
- * ether_addr_greater - Compare two Ethernet addresses
- * @addr1: Pointer to a six-byte array containing the Ethernet address
- * @addr2: Pointer other six-byte array containing the Ethernet address
- *
- * Compare two Ethernet addresses, returns true addr1 is greater than addr2
- */
-static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
-{
- u64 u1 = ether_addr_to_u64(addr1);
- u64 u2 = ether_addr_to_u64(addr2);
-
- return u1 > u2;
-}
-
-/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9ded8c6d8176..83cc9863444b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -60,6 +60,7 @@ enum ethtool_phys_id_state {
enum {
ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+ ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */
/*
* Add your fresh new hash function bits above and remember to update
@@ -73,6 +74,7 @@ enum {
#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32)
#define ETH_RSS_HASH_UNKNOWN 0
#define ETH_RSS_HASH_NO_CHANGE 0
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62bbf3c1aa4a..36162485d663 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1504,14 +1504,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
return;
}
-static inline void
-init_cached_read_index(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- rbi->cached_read_index = rbi->ring_buffer->read_index;
-}
-
/*
* Mask off host interrupt callback notifications
*/
@@ -1545,76 +1537,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
-#define VMBUS_PKT_TRAILER 8
-static inline struct vmpacket_descriptor *
-get_next_pkt_raw(struct vmbus_channel *channel)
+/* Get data payload associated with descriptor */
+static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 priv_read_loc = ring_info->priv_read_index;
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- u32 dsize = ring_info->ring_datasize;
- /*
- * delta is the difference between what is available to read and
- * what was already consumed in place. We commit read index after
- * the whole batch is processed.
- */
- u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
- priv_read_loc - ring_info->ring_buffer->read_index :
- (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
- u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
-
- if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
- return NULL;
-
- return ring_buffer + priv_read_loc;
+ return (void *)((unsigned long)desc + (desc->offset8 << 3));
}
-/*
- * A helper function to step through packets "in-place"
- * This API is to be called after each successful call
- * get_next_pkt_raw().
- */
-static inline void put_pkt_raw(struct vmbus_channel *channel,
- struct vmpacket_descriptor *desc)
+/* Get data size associated with descriptor */
+static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 packetlen = desc->len8 << 3;
- u32 dsize = ring_info->ring_datasize;
-
- /*
- * Include the packet trailer.
- */
- ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
- ring_info->priv_read_index %= dsize;
+ return (desc->len8 << 3) - (desc->offset8 << 3);
}
+
+struct vmpacket_descriptor *
+hv_pkt_iter_first(struct vmbus_channel *channel);
+
+struct vmpacket_descriptor *
+__hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt);
+
+void hv_pkt_iter_close(struct vmbus_channel *channel);
+
/*
- * This call commits the read index and potentially signals the host.
- * Here is the pattern for using the "in-place" consumption APIs:
- *
- * init_cached_read_index();
- *
- * while (get_next_pkt_raw() {
- * process the packet "in-place";
- * put_pkt_raw();
- * }
- * if (packets processed in place)
- * commit_rd_index();
+ * Get next packet descriptor from iterator
+ * If at end of list, return NULL and update host.
*/
-static inline void commit_rd_index(struct vmbus_channel *channel)
+static inline struct vmpacket_descriptor *
+hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_rmb();
- ring_info->ring_buffer->read_index = ring_info->priv_read_index;
+ struct vmpacket_descriptor *nxt;
+
+ nxt = __hv_pkt_iter_next(channel, pkt);
+ if (!nxt)
+ hv_pkt_iter_close(channel);
- hv_signal_on_read(channel);
+ return nxt;
}
+#define foreach_vmbus_pkt(pkt, channel) \
+ for (pkt = hv_pkt_iter_first(channel); pkt; \
+ pkt = hv_pkt_iter_next(channel, pkt))
#endif /* _HYPERV_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 71be5b330d21..f0d79bd054ca 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -70,6 +70,7 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
+ __s32 disable_policy;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 52966b9bfde3..fbab6e0514f0 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -100,8 +100,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 10
+#define FW_MINOR_VERSION 15
+#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -187,6 +187,9 @@
/* DEMS */
#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
@@ -214,6 +217,9 @@
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -269,6 +275,8 @@
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -285,6 +293,9 @@
#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
/* TCM agg counter flag selection (HW) */
#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
@@ -301,6 +312,9 @@
#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
@@ -689,6 +703,16 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -705,9 +729,9 @@ union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
+ union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
- struct regpair roce_handle;
};
/* Event Ring Entry */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 4b402fb0eaad..34d93eb5bfba 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -49,6 +49,9 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 2e417a45c5f7..947a635d04bb 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
-};
-
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
@@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
__le16 reserved;
};
-struct fcoe_sge {
- struct regpair sge_addr;
- __le16 size;
- __le16 reserved0;
- u8 reserved1[3];
- u8 is_valid_sge;
-};
-
-union fcoe_data_desc_ctx {
- struct fcoe_fast_sgl_ctx fast;
- struct fcoe_slow_sgl_ctx slow;
- struct fcoe_sge single_sge;
-};
-
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
- struct fcoe_sge cached_dix_sge;
+ struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
};
struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@ enum fcoe_mode_type {
MAX_FCOE_MODE_TYPE
};
-struct fcoe_mstorm_fcoe_task_st_ctx_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
- __le16 difDataResidue;
- __le16 parent_id;
- __le16 single_sge_saved_offset;
- __le32 data_2_trns_rem;
- __le32 offset_in_io;
- union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
-};
-
-struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
- u8 tx_rx_sgl_mode;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
- u8 rsrv2;
- __le32 num_prm_zero_read;
- struct regpair rsp_buf_addr;
-};
-
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@ struct fcoe_rx_stat {
__le32 rsrv;
};
-enum fcoe_sgl_mode {
- FCOE_SLOW_SGL,
- FCOE_SINGLE_FAST_SGE,
- FCOE_2_FAST_SGE,
- FCOE_3_FAST_SGE,
- FCOE_4_FAST_SGE,
- FCOE_MUL_FAST_SGES,
- MAX_FCOE_SGL_MODE
-};
-
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
@@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
union protection_info_union_ctx protection_info_union;
__le32 data_2_trns_rem;
+ struct scsi_sgl_params sgl_params;
+ u8 reserved1[12];
union fcoe_tx_info_union_ctx tx_info_union;
union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
+ struct scsi_cached_sges data_desc;
__le16 ox_id;
__le16 rx_id;
__le32 task_rety_identifier;
- __le32 reserved1[2];
+ u8 reserved2[8];
};
struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
};
struct mstorm_fcoe_task_st_ctx {
- struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
- struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+ struct regpair rsp_buf_addr;
+ __le32 rsrv[2];
+ struct scsi_sgl_params sgl_params;
+ __le32 data_2_trns_rem;
+ __le32 data_buffer_offset;
+ __le16 parent_id;
+ __le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+ struct scsi_cached_sges data_desc;
};
struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@ struct fcoe_tx_stat {
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x7
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 7
-#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
-#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
-#define FCOE_WQE_SUPER_IO_MASK 0x1
-#define FCOE_WQE_SUPER_IO_SHIFT 9
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
-#define FCOE_WQE_RESERVED0_MASK 0x1F
-#define FCOE_WQE_RESERVED0_SHIFT 11
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4c5747babcf6..69949f8e354b 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -39,17 +39,9 @@
/* iSCSI HSI constants */
#define ISCSI_DEFAULT_MTU (1500)
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
/* KWQ (kernel work queue) layer codes */
#define ISCSI_SLOW_PATH_LAYER_CODE (6)
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
/* iSCSI parameter defaults */
#define ISCSI_DEFAULT_HEADER_DIGEST (0)
#define ISCSI_DEFAULT_DATA_DIGEST (0)
@@ -68,6 +60,10 @@
#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
@@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
__le32 reserved7;
};
-struct iscsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
- struct iscsi_sge sge;
- struct regpair reserved;
- __le32 dsgl_curr_offset[2];
-};
-
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
@@ -229,8 +212,13 @@ struct iscsi_common_hdr {
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 lun_reserved[4];
- __le32 data[6];
+ struct regpair lun_reserved;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmdstat_sn;
+ __le32 exp_statcmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data[3];
};
struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
@@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
@@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
- struct iscsi_sge cdb_sge;
+ struct scsi_sge cdb_sge;
};
struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4;
- __le16 time2retain;
- __le16 time2wait;
+ __le16 time_2_retain;
+ __le16 time_2_wait;
__le32 reserved5[1];
};
@@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
- __le32 rtt;
+ __le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
@@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
- __le32 reserved1;
+ __le32 all_ones;
__le32 reserved2;
__le32 stat_sn;
__le32 exp_cmd_sn;
@@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
- __le32 reserved1[2];
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
@@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-struct iscsi_virt_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- __le32 sgl_initial_offset;
- __le32 dsgl_initial_offset;
- __le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
- u8 sgl_ptr;
- u8 dsgl_ptr;
- __le16 sge_offset;
- __le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- u8 sgl_size;
- u8 dsgl_size;
- __le16 reserved;
- struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
- struct iscsi_virt_sgl_ctx virt_sgl;
- struct iscsi_phys_sgl_ctx phys_sgl;
- struct iscsi_cached_sge_ctx cached_sge;
-};
struct iscsi_debug_modes {
u8 flags;
@@ -771,8 +738,10 @@ struct iscsi_debug_modes {
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
struct iscsi_dif_flags {
@@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
- ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
MAX_ISCSI_EQE_OPCODE
};
@@ -856,31 +824,11 @@ enum iscsi_error_types {
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+ ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
-struct iscsi_mflags {
- u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT 2
-};
-
-struct iscsi_sgl {
- struct regpair sgl_addr;
- __le16 updated_sge_size;
- __le16 updated_sge_offset;
- __le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
- struct iscsi_sgl sgl_struct;
- struct iscsi_sge single_sge;
-};
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
struct iscsi_reg1 {
__le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 3
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
};
union iscsi_seq_num {
@@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
};
struct ystorm_iscsi_task_state {
- union iscsi_data_desc_ctx sgl_ctx_union;
- __le32 buffer_offset[2];
- __le16 bytes_nxt_dif;
- __le16 rxmit_bytes_nxt_dif;
- union iscsi_seq_num seq_num_union;
- u8 dif_bytes_leftover;
- u8 rxmit_dif_bytes_leftover;
- __le16 reuse_count;
- struct iscsi_dif_flags dif_flags;
- u8 local_comp;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
- __le32 sgl_offset[2];
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
};
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
};
@@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
};
struct mstorm_iscsi_task_st_ctx {
- union iscsi_mstorm_sgl sgl_union;
- struct iscsi_dif_flags dif_flags;
- struct iscsi_mflags flags;
- u8 sgl_size;
- u8 host_sge_index;
- __le16 dix_cur_sge_offset;
- __le16 dix_cur_sge_size;
- __le32 data_offset_rtid;
- u8 dif_offset;
- u8 dix_sgl_size;
- u8 dix_sge_index;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ u8 reserved0[2];
struct regpair sense_db;
- struct regpair dix_sgl_cur_sge;
- __le32 rem_task_size;
- __le16 reuse_count;
- __le16 dif_data_residue;
- u8 reserved0[4];
- __le32 reserved1[1];
+ __le32 expected_itt;
+ __le32 reserved1;
};
struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- u8 reserved2;
+ struct iscsi_dif_flags dif_flags;
__le16 reserved3;
__le32 reserved4;
__le32 reserved5;
@@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
@@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-struct iscsi_wqe_field {
- __le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
- struct iscsi_wqe_field cont_field;
- __le32 prev_tid;
-};
struct iscsi_wqe {
__le16 task_id;
u8 flags;
#define ISCSI_WQE_WQE_TYPE_MASK 0x7
#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
#define ISCSI_WQE_RESPONSE_MASK 0x1
#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
- union iscsi_wqe_field_union cont_prevtid_union;
+ __le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 3
-#define ISCSI_XHQE_SUPER_IO_MASK 0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT 4
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
-#define ISCSI_XHQE_RESERVED_MASK 0x3
-#define ISCSI_XHQE_RESERVED_SHIFT 6
- union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
+ union iscsi_seq_num seq_num;
__le16 reserved1;
};
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index fde56c436f71..8e0065c52857 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -300,6 +300,11 @@ struct qed_sb_info {
struct qed_dev *cdev;
};
+enum qed_dev_type {
+ QED_DEV_TYPE_BB,
+ QED_DEV_TYPE_AH,
+};
+
struct qed_dev_info {
unsigned long pci_mem_start;
unsigned long pci_mem_end;
@@ -325,6 +330,8 @@ struct qed_dev_info {
u16 mtu;
bool wol_support;
+
+ enum qed_dev_type dev_type;
};
enum qed_sb_type {
@@ -752,7 +759,7 @@ enum qed_mf_mode {
QED_MF_NPAR,
};
-struct qed_eth_stats {
+struct qed_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -784,11 +791,6 @@ struct qed_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -805,14 +807,8 @@ struct qed_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -827,6 +823,34 @@ struct qed_eth_stats {
u64 tx_mac_ctrl_frames;
};
+struct qed_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct qed_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qed_eth_stats {
+ struct qed_eth_stats_common common;
+
+ union {
+ struct qed_eth_stats_bb bb;
+ struct qed_eth_stats_ah ah;
+ };
+};
+
#define QED_SB_IDX 0x0002
#define RX_PI 0
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index f773aa5e746f..72c770f9f666 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -52,7 +52,8 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index bad02df213df..866f063026de 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -38,4 +38,21 @@
#define ROCE_MAX_QPS (32 * 1024)
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 03f3e37ab059..08df82a096b6 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -40,6 +40,8 @@
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
struct scsi_bd {
@@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
__le16 reserved0[3];
};
+struct scsi_sge {
+ struct regpair sge_addr;
+ __le32 sge_len;
+ __le32 reserved;
+};
+
+struct scsi_cached_sges {
+ struct scsi_sge sge[4];
+};
+
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
@@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
-struct scsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
+enum scsi_sgl_mode {
+ SCSI_TX_SLOW_SGL,
+ SCSI_FAST_SGL,
+ MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+ struct regpair sgl_addr;
+ __le32 sgl_total_length;
+ __le32 sge_offset;
+ __le16 sgl_num_sges;
+ u8 sgl_index;
+ u8 reserved;
};
struct scsi_terminate_extra_params {
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 46fe7856f1b2..a5e843268f0e 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN,
+ TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT
};
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index fc273e9d5f67..be47b859e954 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,9 @@
#include <linux/platform_device.h>
+#define MTL_MAX_RX_QUEUES 8
+#define MTL_MAX_TX_QUEUES 8
+
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
#define STMMAC_RX_COE_TYPE2 2
@@ -44,6 +47,18 @@
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+/* MTL algorithms identifiers */
+#define MTL_TX_ALGORITHM_WRR 0x0
+#define MTL_TX_ALGORITHM_WFQ 0x1
+#define MTL_TX_ALGORITHM_DWRR 0x2
+#define MTL_TX_ALGORITHM_SP 0x3
+#define MTL_RX_ALGORITHM_SP 0x4
+#define MTL_RX_ALGORITHM_WSP 0x5
+
+/* RX/TX Queue Mode */
+#define MTL_QUEUE_DCB 0x0
+#define MTL_QUEUE_AVB 0x1
+
/* The MDC clock could be set higher than the IEEE 802.3
* specified frequency limit 0f 2.5 MHz, by programming a clock divider
* of value different than the above defined values. The resultant MDIO
@@ -109,6 +124,21 @@ struct stmmac_axi {
bool axi_rb;
};
+struct stmmac_rxq_cfg {
+ u8 mode_to_use;
+ u8 chan;
+};
+
+struct stmmac_txq_cfg {
+ u8 weight;
+ u8 mode_to_use;
+ /* Credit Base Shaper parameters */
+ u32 send_slope;
+ u32 idle_slope;
+ u32 high_credit;
+ u32 low_credit;
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
@@ -133,6 +163,12 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u8 rx_queues_to_use;
+ u8 tx_queues_to_use;
+ u8 rx_sched_algorithm;
+ u8 tx_sched_algorithm;
+ struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
+ struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 4e13e695f025..bf0e42c2a6f7 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -248,6 +248,11 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
return !!((ds->dsa_port_mask) & (1 << p));
}
+static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
+{
+ return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
+}
+
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 368bb4024b78..d9cee9659978 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -232,9 +232,21 @@ enum fib_event_type {
int register_fib_notifier(struct notifier_block *nb,
void (*cb)(struct notifier_block *nb));
int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
+void fib_notify(struct net *net, struct notifier_block *nb);
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+void fib_rules_notify(struct net *net, struct notifier_block *nb);
+#else
+static inline void fib_rules_notify(struct net *net, struct notifier_block *nb)
+{
+}
+#endif
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 179253f9dcfd..a18af6a16eb5 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -19,6 +19,8 @@
struct mpls_iptunnel_encap {
u32 label[MAX_NEW_LABELS];
u8 labels;
+ u8 ttl_propagate;
+ u8 default_ttl;
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index d29203651c01..6608b3693385 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -9,8 +9,11 @@ struct mpls_route;
struct ctl_table_header;
struct netns_mpls {
+ int ip_ttl_propagate;
+ int default_ttl;
size_t platform_labels;
struct mpls_route __rcu * __rcu *platform_label;
+
struct ctl_table_header *ctl;
};
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f1b76b8e6d2d..bec46f63f10c 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -92,7 +92,7 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
-void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index aeec4086afb2..65d502610314 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -66,6 +66,7 @@ struct Qdisc {
#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
* qdisc_tree_decrease_qlen() should stop.
*/
+#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index b6f682ec184a..47113f2c4b0a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -293,6 +293,22 @@ struct sctp_chunk *sctp_process_strreset_inreq(
struct sctp_association *asoc,
union sctp_params param,
struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
/* Prototypes for statetable processing. */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 324b5965fc4d..1060494ac230 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -132,6 +132,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
__u16 stream_num, __u16 *stream_list, gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 0caee631a836..fe236b3429f0 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -6,10 +6,10 @@
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport, u32 *tsoff);
+u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport, u32 *tsoff);
-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport, u32 *tsoff);
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/sock.h b/include/net/sock.h
index 03252d53975d..08142be8938e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1783,11 +1783,8 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
- /*
- * This can be called while sk is owned by the caller only,
- * with no state that can be checked in a rcu_dereference_check() cond
- */
- old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
dst_release(old_dst);
}
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 48cca321ee6c..9690c047b6cf 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -49,4 +49,9 @@ static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
return to_vlan(a)->tcfv_push_proto;
}
+static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_prio;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6ec4ea652f3f..bede8f7fa742 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1816,7 +1816,7 @@ struct tcp_request_sock_ops {
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict);
- __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
+ __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 8ef9e75e004e..d8f6a1ac9af4 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -183,6 +183,7 @@ enum {
DEVCONF_SEG6_REQUIRE_HMAC,
DEVCONF_ENHANCED_DAD,
DEVCONF_ADDR_GEN_MODE,
+ DEVCONF_DISABLE_POLICY,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
index d80a0498f77e..f5e45095b0bb 100644
--- a/include/uapi/linux/mpls_iptunnel.h
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -16,11 +16,13 @@
/* MPLS tunnel attributes
* [RTA_ENCAP] = {
* [MPLS_IPTUNNEL_DST]
+ * [MPLS_IPTUNNEL_TTL]
* }
*/
enum {
MPLS_IPTUNNEL_UNSPEC,
MPLS_IPTUNNEL_DST,
+ MPLS_IPTUNNEL_TTL,
__MPLS_IPTUNNEL_MAX,
};
#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 6546917d605a..3dd72aee4d32 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -319,6 +319,7 @@ enum rtattr_type_t {
RTA_EXPIRES,
RTA_PAD,
RTA_UID,
+ RTA_TTL_PROPAGATE,
__RTA_MAX
};
@@ -545,6 +546,7 @@ enum {
TCA_STATS2,
TCA_STAB,
TCA_PAD,
+ TCA_DUMP_INVISIBLE,
__TCA_MAX
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d3ae381fcf33..7212870ef5d7 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -115,6 +115,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_PR_SUPPORTED 113
#define SCTP_DEFAULT_PRINFO 114
#define SCTP_PR_ASSOC_STATUS 115
+#define SCTP_RECONFIG_SUPPORTED 117
#define SCTP_ENABLE_STREAM_RESET 118
#define SCTP_RESET_STREAMS 119
#define SCTP_RESET_ASSOC 120
@@ -502,6 +503,28 @@ struct sctp_stream_reset_event {
__u16 strreset_stream_list[];
};
+#define SCTP_ASSOC_RESET_DENIED 0x0004
+#define SCTP_ASSOC_RESET_FAILED 0x0008
+struct sctp_assoc_reset_event {
+ __u16 assocreset_type;
+ __u16 assocreset_flags;
+ __u32 assocreset_length;
+ sctp_assoc_t assocreset_assoc_id;
+ __u32 assocreset_local_tsn;
+ __u32 assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_CHANGE_DENIED 0x0004
+#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+struct sctp_stream_change_event {
+ __u16 strchange_type;
+ __u16 strchange_flags;
+ __u32 strchange_length;
+ sctp_assoc_t strchange_assoc_id;
+ __u16 strchange_instrms;
+ __u16 strchange_outstrms;
+};
+
/*
* Described in Section 7.3
* Ancillary Data and Notification Interest Options
@@ -518,6 +541,8 @@ struct sctp_event_subscribe {
__u8 sctp_authentication_event;
__u8 sctp_sender_dry_event;
__u8 sctp_stream_reset_event;
+ __u8 sctp_assoc_reset_event;
+ __u8 sctp_stream_change_event;
};
/*
@@ -543,6 +568,8 @@ union sctp_notification {
struct sctp_authkey_event sn_authkey_event;
struct sctp_sender_dry_event sn_sender_dry_event;
struct sctp_stream_reset_event sn_strreset_event;
+ struct sctp_assoc_reset_event sn_assocreset_event;
+ struct sctp_stream_change_event sn_strchange_event;
};
/* Section 5.3.1
@@ -572,6 +599,10 @@ enum sctp_sn_type {
#define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT
SCTP_STREAM_RESET_EVENT,
#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT
+ SCTP_ASSOC_RESET_EVENT,
+#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
+ SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
};
/* Notification error codes used to fill up the error fields in some
diff --git a/net/atm/common.c b/net/atm/common.c
index 9613381f5db0..f06422f4108d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -62,21 +62,16 @@ static void vcc_remove_socket(struct sock *sk)
write_unlock_irq(&vcc_sklist_lock);
}
-static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
+static bool vcc_tx_ready(struct atm_vcc *vcc, unsigned int size)
{
- struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
- return NULL;
+ return false;
}
- while (!(skb = alloc_skb(size, GFP_KERNEL)))
- schedule();
- pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
- atomic_add(skb->truesize, &sk->sk_wmem_alloc);
- return skb;
+ return true;
}
static void vcc_sock_destruct(struct sock *sk)
@@ -606,7 +601,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
- while (!(skb = alloc_tx(vcc, eff))) {
+ while (!vcc_tx_ready(vcc, eff)) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
@@ -628,6 +623,15 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
+
+ skb = alloc_skb(eff, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+ goto out;
+ }
+ pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index fb55327dcfea..70ccda233bd1 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -412,9 +412,8 @@ static int __init init_net_drop_monitor(void)
for_each_possible_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
INIT_WORK(&data->dm_alert_work, send_dm_alert);
- init_timer(&data->send_timer);
- data->send_timer.data = (unsigned long)data;
- data->send_timer.function = sched_send_work;
+ setup_timer(&data->send_timer, sched_send_work,
+ (unsigned long)data);
spin_lock_init(&data->lock);
reset_per_cpu_data(data);
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index aecb2c7241b6..905a88ad28e0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -109,6 +109,7 @@ static const char
rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
[ETH_RSS_HASH_TOP_BIT] = "toeplitz",
[ETH_RSS_HASH_XOR_BIT] = "xor",
+ [ETH_RSS_HASH_CRC32_BIT] = "crc32",
};
static const char
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..5f3ae922fcd1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -113,6 +113,216 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
}
EXPORT_SYMBOL(__skb_flow_get_ports);
+enum flow_dissect_ret {
+ FLOW_DISSECT_RET_OUT_GOOD,
+ FLOW_DISSECT_RET_OUT_BAD,
+ FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
+};
+
+static enum flow_dissect_ret
+__skb_flow_dissect_mpls(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, int nhoff, int hlen)
+{
+ struct flow_dissector_key_keyid *key_keyid;
+ struct mpls_label *hdr, _hdr[2];
+
+ if (!dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY))
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
+ hlen, &_hdr);
+ if (!hdr)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
+ MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
+ key_keyid = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
+ target_container);
+ key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
+ }
+ return FLOW_DISSECT_RET_OUT_GOOD;
+}
+
+static enum flow_dissect_ret
+__skb_flow_dissect_arp(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, int nhoff, int hlen)
+{
+ struct flow_dissector_key_arp *key_arp;
+ struct {
+ unsigned char ar_sha[ETH_ALEN];
+ unsigned char ar_sip[4];
+ unsigned char ar_tha[ETH_ALEN];
+ unsigned char ar_tip[4];
+ } *arp_eth, _arp_eth;
+ const struct arphdr *arp;
+ struct arphdr *_arp;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
+ hlen, &_arp);
+ if (!arp)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+ arp->ar_pro != htons(ETH_P_IP) ||
+ arp->ar_hln != ETH_ALEN ||
+ arp->ar_pln != 4 ||
+ (arp->ar_op != htons(ARPOP_REPLY) &&
+ arp->ar_op != htons(ARPOP_REQUEST)))
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
+ sizeof(_arp_eth), data,
+ hlen, &_arp_eth);
+ if (!arp_eth)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ key_arp = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ARP,
+ target_container);
+
+ memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
+ memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
+
+ /* Only store the lower byte of the opcode;
+ * this covers ARPOP_REPLY and ARPOP_REQUEST.
+ */
+ key_arp->op = ntohs(arp->ar_op) & 0xff;
+
+ ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
+ ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
+
+ return FLOW_DISSECT_RET_OUT_GOOD;
+}
+
+static enum flow_dissect_ret
+__skb_flow_dissect_gre(const struct sk_buff *skb,
+ struct flow_dissector_key_control *key_control,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data,
+ __be16 *p_proto, int *p_nhoff, int *p_hlen,
+ unsigned int flags)
+{
+ struct flow_dissector_key_keyid *key_keyid;
+ struct gre_base_hdr *hdr, _hdr;
+ int offset = 0;
+ u16 gre_ver;
+
+ hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
+ data, *p_hlen, &_hdr);
+ if (!hdr)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ /* Only look inside GRE without routing */
+ if (hdr->flags & GRE_ROUTING)
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ /* Only look inside GRE for version 0 and 1 */
+ gre_ver = ntohs(hdr->flags & GRE_VERSION);
+ if (gre_ver > 1)
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ *p_proto = hdr->protocol;
+ if (gre_ver) {
+ /* Version1 must be PPTP, and check the flags */
+ if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
+ return FLOW_DISSECT_RET_OUT_GOOD;
+ }
+
+ offset += sizeof(struct gre_base_hdr);
+
+ if (hdr->flags & GRE_CSUM)
+ offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
+ sizeof(((struct gre_full_hdr *) 0)->reserved1);
+
+ if (hdr->flags & GRE_KEY) {
+ const __be32 *keyid;
+ __be32 _keyid;
+
+ keyid = __skb_header_pointer(skb, *p_nhoff + offset,
+ sizeof(_keyid),
+ data, *p_hlen, &_keyid);
+ if (!keyid)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+ key_keyid = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_GRE_KEYID,
+ target_container);
+ if (gre_ver == 0)
+ key_keyid->keyid = *keyid;
+ else
+ key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
+ }
+ offset += sizeof(((struct gre_full_hdr *) 0)->key);
+ }
+
+ if (hdr->flags & GRE_SEQ)
+ offset += sizeof(((struct pptp_gre_header *) 0)->seq);
+
+ if (gre_ver == 0) {
+ if (*p_proto == htons(ETH_P_TEB)) {
+ const struct ethhdr *eth;
+ struct ethhdr _eth;
+
+ eth = __skb_header_pointer(skb, *p_nhoff + offset,
+ sizeof(_eth),
+ data, *p_hlen, &_eth);
+ if (!eth)
+ return FLOW_DISSECT_RET_OUT_BAD;
+ *p_proto = eth->h_proto;
+ offset += sizeof(*eth);
+
+ /* Cap headers that we access via pointers at the
+ * end of the Ethernet header as our maximum alignment
+ * at that point is only 2 bytes.
+ */
+ if (NET_IP_ALIGN)
+ *p_hlen = *p_nhoff + offset;
+ }
+ } else { /* version 1, must be PPTP */
+ u8 _ppp_hdr[PPP_HDRLEN];
+ u8 *ppp_hdr;
+
+ if (hdr->flags & GRE_ACK)
+ offset += sizeof(((struct pptp_gre_header *) 0)->ack);
+
+ ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
+ sizeof(_ppp_hdr),
+ data, *p_hlen, _ppp_hdr);
+ if (!ppp_hdr)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ switch (PPP_PROTOCOL(ppp_hdr)) {
+ case PPP_IP:
+ *p_proto = htons(ETH_P_IP);
+ break;
+ case PPP_IPV6:
+ *p_proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ /* Could probably catch some more like MPLS */
+ break;
+ }
+
+ offset += PPP_HDRLEN;
+ }
+
+ *p_nhoff += offset;
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -138,12 +348,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
- struct flow_dissector_key_arp *key_arp;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
- struct flow_dissector_key_keyid *key_keyid;
bool skip_vlan = false;
u8 ip_proto = 0;
bool ret;
@@ -181,7 +389,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
}
-again:
+proto_again:
switch (proto) {
case htons(ETH_P_IP): {
const struct iphdr *iph;
@@ -284,7 +492,7 @@ ipv6:
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
if (skip_vlan)
- goto again;
+ goto proto_again;
}
skip_vlan = true;
@@ -307,7 +515,7 @@ ipv6:
}
}
- goto again;
+ goto proto_again;
}
case htons(ETH_P_PPP_SES): {
struct {
@@ -349,31 +557,17 @@ ipv6:
}
case htons(ETH_P_MPLS_UC):
- case htons(ETH_P_MPLS_MC): {
- struct mpls_label *hdr, _hdr[2];
+ case htons(ETH_P_MPLS_MC):
mpls:
- hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
- hlen, &_hdr);
- if (!hdr)
- goto out_bad;
-
- if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
- MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
- key_keyid = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
- target_container);
- key_keyid->keyid = hdr[1].entry &
- htonl(MPLS_LS_LABEL_MASK);
- }
-
+ switch (__skb_flow_dissect_mpls(skb, flow_dissector,
+ target_container, data,
+ nhoff, hlen)) {
+ case FLOW_DISSECT_RET_OUT_GOOD:
goto out_good;
+ case FLOW_DISSECT_RET_OUT_BAD:
+ default:
+ goto out_bad;
}
-
- goto out_good;
- }
-
case htons(ETH_P_FCOE):
if ((hlen - nhoff) < FCOE_HEADER_LEN)
goto out_bad;
@@ -382,177 +576,33 @@ mpls:
goto out_good;
case htons(ETH_P_ARP):
- case htons(ETH_P_RARP): {
- struct {
- unsigned char ar_sha[ETH_ALEN];
- unsigned char ar_sip[4];
- unsigned char ar_tha[ETH_ALEN];
- unsigned char ar_tip[4];
- } *arp_eth, _arp_eth;
- const struct arphdr *arp;
- struct arphdr *_arp;
-
- arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
- hlen, &_arp);
- if (!arp)
- goto out_bad;
-
- if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
- arp->ar_pro != htons(ETH_P_IP) ||
- arp->ar_hln != ETH_ALEN ||
- arp->ar_pln != 4 ||
- (arp->ar_op != htons(ARPOP_REPLY) &&
- arp->ar_op != htons(ARPOP_REQUEST)))
- goto out_bad;
-
- arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
- sizeof(_arp_eth), data,
- hlen,
- &_arp_eth);
- if (!arp_eth)
+ case htons(ETH_P_RARP):
+ switch (__skb_flow_dissect_arp(skb, flow_dissector,
+ target_container, data,
+ nhoff, hlen)) {
+ case FLOW_DISSECT_RET_OUT_GOOD:
+ goto out_good;
+ case FLOW_DISSECT_RET_OUT_BAD:
+ default:
goto out_bad;
-
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_ARP)) {
-
- key_arp = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_ARP,
- target_container);
-
- memcpy(&key_arp->sip, arp_eth->ar_sip,
- sizeof(key_arp->sip));
- memcpy(&key_arp->tip, arp_eth->ar_tip,
- sizeof(key_arp->tip));
-
- /* Only store the lower byte of the opcode;
- * this covers ARPOP_REPLY and ARPOP_REQUEST.
- */
- key_arp->op = ntohs(arp->ar_op) & 0xff;
-
- ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
- ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
}
-
- goto out_good;
- }
-
default:
goto out_bad;
}
ip_proto_again:
switch (ip_proto) {
- case IPPROTO_GRE: {
- struct gre_base_hdr *hdr, _hdr;
- u16 gre_ver;
- int offset = 0;
-
- hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
- if (!hdr)
+ case IPPROTO_GRE:
+ switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
+ target_container, data,
+ &proto, &nhoff, &hlen, flags)) {
+ case FLOW_DISSECT_RET_OUT_GOOD:
+ goto out_good;
+ case FLOW_DISSECT_RET_OUT_BAD:
goto out_bad;
-
- /* Only look inside GRE without routing */
- if (hdr->flags & GRE_ROUTING)
- break;
-
- /* Only look inside GRE for version 0 and 1 */
- gre_ver = ntohs(hdr->flags & GRE_VERSION);
- if (gre_ver > 1)
- break;
-
- proto = hdr->protocol;
- if (gre_ver) {
- /* Version1 must be PPTP, and check the flags */
- if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
- break;
- }
-
- offset += sizeof(struct gre_base_hdr);
-
- if (hdr->flags & GRE_CSUM)
- offset += sizeof(((struct gre_full_hdr *)0)->csum) +
- sizeof(((struct gre_full_hdr *)0)->reserved1);
-
- if (hdr->flags & GRE_KEY) {
- const __be32 *keyid;
- __be32 _keyid;
-
- keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid),
- data, hlen, &_keyid);
- if (!keyid)
- goto out_bad;
-
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_GRE_KEYID)) {
- key_keyid = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_GRE_KEYID,
- target_container);
- if (gre_ver == 0)
- key_keyid->keyid = *keyid;
- else
- key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
- }
- offset += sizeof(((struct gre_full_hdr *)0)->key);
+ case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
+ goto proto_again;
}
-
- if (hdr->flags & GRE_SEQ)
- offset += sizeof(((struct pptp_gre_header *)0)->seq);
-
- if (gre_ver == 0) {
- if (proto == htons(ETH_P_TEB)) {
- const struct ethhdr *eth;
- struct ethhdr _eth;
-
- eth = __skb_header_pointer(skb, nhoff + offset,
- sizeof(_eth),
- data, hlen, &_eth);
- if (!eth)
- goto out_bad;
- proto = eth->h_proto;
- offset += sizeof(*eth);
-
- /* Cap headers that we access via pointers at the
- * end of the Ethernet header as our maximum alignment
- * at that point is only 2 bytes.
- */
- if (NET_IP_ALIGN)
- hlen = (nhoff + offset);
- }
- } else { /* version 1, must be PPTP */
- u8 _ppp_hdr[PPP_HDRLEN];
- u8 *ppp_hdr;
-
- if (hdr->flags & GRE_ACK)
- offset += sizeof(((struct pptp_gre_header *)0)->ack);
-
- ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
- sizeof(_ppp_hdr),
- data, hlen, _ppp_hdr);
- if (!ppp_hdr)
- goto out_bad;
-
- switch (PPP_PROTOCOL(ppp_hdr)) {
- case PPP_IP:
- proto = htons(ETH_P_IP);
- break;
- case PPP_IPV6:
- proto = htons(ETH_P_IPV6);
- break;
- default:
- /* Could probably catch some more like MPLS */
- break;
- }
-
- offset += PPP_HDRLEN;
- }
-
- nhoff += offset;
- key_control->flags |= FLOW_DIS_ENCAPSULATION;
- if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
- goto out_good;
-
- goto again;
- }
case NEXTHDR_HOP:
case NEXTHDR_ROUTING:
case NEXTHDR_DEST: {
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 6df9f8fabf0c..b5888190223c 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -162,7 +162,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
struct rtnexthop *rtnh = (struct rtnexthop *)attr;
struct nlattr *nla_entype;
struct nlattr *attrs;
- struct nlattr *nla;
u16 encap_type;
int attrlen;
@@ -170,7 +169,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
attrs = rtnh_attrs(rtnh);
- nla = nla_find(attrs, attrlen, RTA_ENCAP);
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..fb87e78a2cc7 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -45,8 +45,8 @@ static u32 seq_scale(u32 seq)
#endif
#if IS_ENABLED(CONFIG_IPV6)
-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport, u32 *tsoff)
+u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport, u32 *tsoff)
{
const struct {
struct in6_addr saddr;
@@ -66,7 +66,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
*tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
return seq_scale(hash);
}
-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport)
@@ -89,14 +89,13 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#ifdef CONFIG_INET
-/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
+/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
* but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
* it would be easy enough to have the former function use siphash_4u32, passing
* the arguments as separate u32.
*/
-
-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport, u32 *tsoff)
+u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport, u32 *tsoff)
{
u64 hash;
net_secret_init();
diff --git a/net/core/sock.c b/net/core/sock.c
index a96d5f7a5734..a83731c36761 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -247,12 +247,66 @@ static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
_sock_locks("k-clock-")
};
+static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
+ "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
+ "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
+ "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
+ "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
+ "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
+ "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
+ "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
+ "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
+ "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
+ "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
+ "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
+ "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
+ "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
+ "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
+ "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX"
+};
+static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
+ "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
+ "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
+ "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
+ "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
+ "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
+ "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
+ "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
+ "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
+ "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
+ "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
+ "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
+ "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
+ "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
+ "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
+ "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX"
+};
+static const char *const af_family_elock_key_strings[AF_MAX+1] = {
+ "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
+ "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
+ "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
+ "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
+ "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
+ "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
+ "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
+ "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
+ "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
+ "elock-27" , "elock-28" , "elock-AF_CAN" ,
+ "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
+ "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
+ "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
+ "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
+ "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX"
+};
/*
- * sk_callback_lock locking rules are per-address-family,
+ * sk_callback_lock and sk queues locking rules are per-address-family,
* so split the lock classes by using a per-AF key:
*/
static struct lock_class_key af_callback_keys[AF_MAX];
+static struct lock_class_key af_rlock_keys[AF_MAX];
+static struct lock_class_key af_wlock_keys[AF_MAX];
+static struct lock_class_key af_elock_keys[AF_MAX];
static struct lock_class_key af_kern_callback_keys[AF_MAX];
/* Take into consideration the size of the struct sk_buff overhead in the
@@ -1478,6 +1532,27 @@ void sk_free(struct sock *sk)
}
EXPORT_SYMBOL(sk_free);
+static void sk_init_common(struct sock *sk)
+{
+ skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init(&sk->sk_write_queue);
+ skb_queue_head_init(&sk->sk_error_queue);
+
+ rwlock_init(&sk->sk_callback_lock);
+ lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
+ af_rlock_keys + sk->sk_family,
+ af_family_rlock_key_strings[sk->sk_family]);
+ lockdep_set_class_and_name(&sk->sk_write_queue.lock,
+ af_wlock_keys + sk->sk_family,
+ af_family_wlock_key_strings[sk->sk_family]);
+ lockdep_set_class_and_name(&sk->sk_error_queue.lock,
+ af_elock_keys + sk->sk_family,
+ af_family_elock_key_strings[sk->sk_family]);
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
+ af_callback_keys + sk->sk_family,
+ af_family_clock_key_strings[sk->sk_family]);
+}
+
/**
* sk_clone_lock - clone a socket, and lock its clone
* @sk: the socket to clone
@@ -1511,13 +1586,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
atomic_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0);
- skb_queue_head_init(&newsk->sk_receive_queue);
- skb_queue_head_init(&newsk->sk_write_queue);
-
- rwlock_init(&newsk->sk_callback_lock);
- lockdep_set_class_and_name(&newsk->sk_callback_lock,
- af_callback_keys + newsk->sk_family,
- af_family_clock_key_strings[newsk->sk_family]);
+ sk_init_common(newsk);
newsk->sk_dst_cache = NULL;
newsk->sk_dst_pending_confirm = 0;
@@ -1528,7 +1597,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
sock_reset_flag(newsk, SOCK_DONE);
- skb_queue_head_init(&newsk->sk_error_queue);
filter = rcu_dereference_protected(newsk->sk_filter, 1);
if (filter != NULL)
@@ -2455,10 +2523,7 @@ EXPORT_SYMBOL(sk_stop_timer);
void sock_init_data(struct socket *sock, struct sock *sk)
{
- skb_queue_head_init(&sk->sk_receive_queue);
- skb_queue_head_init(&sk->sk_write_queue);
- skb_queue_head_init(&sk->sk_error_queue);
-
+ sk_init_common(sk);
sk->sk_send_head = NULL;
init_timer(&sk->sk_timer);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 7de5b40a5d0d..9afa2a5030b2 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -132,6 +132,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/fib_rules.h>
+#include <net/tcp.h>
#include <net/dn.h>
#include <net/dn_nsp.h>
#include <net/dn_dev.h>
@@ -1469,18 +1470,18 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
case DSO_NODELAY:
if (optlen != sizeof(int))
return -EINVAL;
- if (scp->nonagle == 2)
+ if (scp->nonagle == TCP_NAGLE_CORK)
return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : 1;
+ scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
/* if (scp->nonagle == 1) { Push pending frames } */
break;
case DSO_CORK:
if (optlen != sizeof(int))
return -EINVAL;
- if (scp->nonagle == 1)
+ if (scp->nonagle == TCP_NAGLE_OFF)
return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : 2;
+ scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
/* if (scp->nonagle == 0) { Push pending frames } */
break;
@@ -1608,14 +1609,14 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
case DSO_NODELAY:
if (r_len > sizeof(int))
r_len = sizeof(int);
- val = (scp->nonagle == 1);
+ val = (scp->nonagle == TCP_NAGLE_OFF);
r_data = &val;
break;
case DSO_CORK:
if (r_len > sizeof(int))
r_len = sizeof(int);
- val = (scp->nonagle == 2);
+ val = (scp->nonagle == TCP_NAGLE_CORK);
r_data = &val;
break;
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c6d4238ff94a..f83de23a30e7 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \
tcp_rate.o tcp_recovery.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
- fib_frontend.o fib_semantics.o fib_trie.o \
+ fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o
obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebedd545e5e..927f1d4b8c80 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1192,6 +1192,18 @@ out:
return done;
}
+static __be32 in_dev_select_addr(const struct in_device *in_dev,
+ int scope)
+{
+ for_primary_ifa(in_dev) {
+ if (ifa->ifa_scope != RT_SCOPE_LINK &&
+ ifa->ifa_scope <= scope)
+ return ifa->ifa_local;
+ } endfor_ifa(in_dev);
+
+ return 0;
+}
+
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
{
__be32 addr = 0;
@@ -1228,13 +1240,9 @@ no_in_dev:
if (master_idx &&
(dev = dev_get_by_index_rcu(net, master_idx)) &&
(in_dev = __in_dev_get_rcu(dev))) {
- for_primary_ifa(in_dev) {
- if (ifa->ifa_scope != RT_SCOPE_LINK &&
- ifa->ifa_scope <= scope) {
- addr = ifa->ifa_local;
- goto out_unlock;
- }
- } endfor_ifa(in_dev);
+ addr = in_dev_select_addr(in_dev, scope);
+ if (addr)
+ goto out_unlock;
}
/* Not loopback addresses on loopback should be preferred
@@ -1249,13 +1257,9 @@ no_in_dev:
if (!in_dev)
continue;
- for_primary_ifa(in_dev) {
- if (ifa->ifa_scope != RT_SCOPE_LINK &&
- ifa->ifa_scope <= scope) {
- addr = ifa->ifa_local;
- goto out_unlock;
- }
- } endfor_ifa(in_dev);
+ addr = in_dev_select_addr(in_dev, scope);
+ if (addr)
+ goto out_unlock;
}
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
new file mode 100644
index 000000000000..e0714d975947
--- /dev/null
+++ b/net/ipv4/fib_notifier.c
@@ -0,0 +1,86 @@
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <net/net_namespace.h>
+#include <net/netns/ipv4.h>
+#include <net/ip_fib.h>
+
+static ATOMIC_NOTIFIER_HEAD(fib_chain);
+
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info)
+{
+ info->net = net;
+ return nb->notifier_call(nb, event_type, info);
+}
+
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info)
+{
+ net->ipv4.fib_seq++;
+ info->net = net;
+ return atomic_notifier_call_chain(&fib_chain, event_type, info);
+}
+
+static unsigned int fib_seq_sum(void)
+{
+ unsigned int fib_seq = 0;
+ struct net *net;
+
+ rtnl_lock();
+ for_each_net(net)
+ fib_seq += net->ipv4.fib_seq;
+ rtnl_unlock();
+
+ return fib_seq;
+}
+
+static bool fib_dump_is_consistent(struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb),
+ unsigned int fib_seq)
+{
+ atomic_notifier_chain_register(&fib_chain, nb);
+ if (fib_seq == fib_seq_sum())
+ return true;
+ atomic_notifier_chain_unregister(&fib_chain, nb);
+ if (cb)
+ cb(nb);
+ return false;
+}
+
+#define FIB_DUMP_MAX_RETRIES 5
+int register_fib_notifier(struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb))
+{
+ int retries = 0;
+
+ do {
+ unsigned int fib_seq = fib_seq_sum();
+ struct net *net;
+
+ /* Mutex semantics guarantee that every change done to
+ * FIB tries before we read the change sequence counter
+ * is now visible to us.
+ */
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ fib_rules_notify(net, nb);
+ fib_notify(net, nb);
+ }
+ rcu_read_unlock();
+
+ if (fib_dump_is_consistent(nb, cb, fib_seq))
+ return 0;
+ } while (++retries < FIB_DUMP_MAX_RETRIES);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL(register_fib_notifier);
+
+int unregister_fib_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&fib_chain, nb);
+}
+EXPORT_SYMBOL(unregister_fib_notifier);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2e50062f642d..289210903d58 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -172,6 +172,14 @@ static int call_fib_rule_notifiers(struct net *net,
return call_fib_notifiers(net, event_type, &info);
}
+void fib_rules_notify(struct net *net, struct notifier_block *nb)
+{
+ struct fib_notifier_info info;
+
+ if (net->ipv4.fib_has_custom_rules)
+ call_fib_notifier(nb, net, FIB_EVENT_RULE_ADD, &info);
+}
+
static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
FRA_GENERIC_POLICY,
[FRA_FLOW] = { .type = NLA_U32 },
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2f0d8233950f..1201409ba1dc 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -84,43 +84,6 @@
#include <trace/events/fib.h>
#include "fib_lookup.h"
-static unsigned int fib_seq_sum(void)
-{
- unsigned int fib_seq = 0;
- struct net *net;
-
- rtnl_lock();
- for_each_net(net)
- fib_seq += net->ipv4.fib_seq;
- rtnl_unlock();
-
- return fib_seq;
-}
-
-static ATOMIC_NOTIFIER_HEAD(fib_chain);
-
-static int call_fib_notifier(struct notifier_block *nb, struct net *net,
- enum fib_event_type event_type,
- struct fib_notifier_info *info)
-{
- info->net = net;
- return nb->notifier_call(nb, event_type, info);
-}
-
-static void fib_rules_notify(struct net *net, struct notifier_block *nb,
- enum fib_event_type event_type)
-{
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- struct fib_notifier_info info;
-
- if (net->ipv4.fib_has_custom_rules)
- call_fib_notifier(nb, net, event_type, &info);
-#endif
-}
-
-static void fib_notify(struct net *net, struct notifier_block *nb,
- enum fib_event_type event_type);
-
static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_info *fi,
@@ -137,62 +100,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
return call_fib_notifier(nb, net, event_type, &info.info);
}
-static bool fib_dump_is_consistent(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb),
- unsigned int fib_seq)
-{
- atomic_notifier_chain_register(&fib_chain, nb);
- if (fib_seq == fib_seq_sum())
- return true;
- atomic_notifier_chain_unregister(&fib_chain, nb);
- if (cb)
- cb(nb);
- return false;
-}
-
-#define FIB_DUMP_MAX_RETRIES 5
-int register_fib_notifier(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb))
-{
- int retries = 0;
-
- do {
- unsigned int fib_seq = fib_seq_sum();
- struct net *net;
-
- /* Mutex semantics guarantee that every change done to
- * FIB tries before we read the change sequence counter
- * is now visible to us.
- */
- rcu_read_lock();
- for_each_net_rcu(net) {
- fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD);
- fib_notify(net, nb, FIB_EVENT_ENTRY_ADD);
- }
- rcu_read_unlock();
-
- if (fib_dump_is_consistent(nb, cb, fib_seq))
- return 0;
- } while (++retries < FIB_DUMP_MAX_RETRIES);
-
- return -EBUSY;
-}
-EXPORT_SYMBOL(register_fib_notifier);
-
-int unregister_fib_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&fib_chain, nb);
-}
-EXPORT_SYMBOL(unregister_fib_notifier);
-
-int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
- struct fib_notifier_info *info)
-{
- net->ipv4.fib_seq++;
- info->net = net;
- return atomic_notifier_call_chain(&fib_chain, event_type, info);
-}
-
static int call_fib_entry_notifiers(struct net *net,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_info *fi,
@@ -1995,8 +1902,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
}
static void fib_leaf_notify(struct net *net, struct key_vector *l,
- struct fib_table *tb, struct notifier_block *nb,
- enum fib_event_type event_type)
+ struct fib_table *tb, struct notifier_block *nb)
{
struct fib_alias *fa;
@@ -2012,22 +1918,21 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l,
if (tb->tb_id != fa->tb_id)
continue;
- call_fib_entry_notifier(nb, net, event_type, l->key,
+ call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key,
KEYLENGTH - fa->fa_slen, fi, fa->fa_tos,
fa->fa_type, fa->tb_id);
}
}
static void fib_table_notify(struct net *net, struct fib_table *tb,
- struct notifier_block *nb,
- enum fib_event_type event_type)
+ struct notifier_block *nb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
t_key key = 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- fib_leaf_notify(net, l, tb, nb, event_type);
+ fib_leaf_notify(net, l, tb, nb);
key = l->key + 1;
/* stop in case of wrap around */
@@ -2036,8 +1941,7 @@ static void fib_table_notify(struct net *net, struct fib_table *tb,
}
}
-static void fib_notify(struct net *net, struct notifier_block *nb,
- enum fib_event_type event_type)
+void fib_notify(struct net *net, struct notifier_block *nb)
{
unsigned int h;
@@ -2046,7 +1950,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb,
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist)
- fib_table_notify(net, tb, nb, event_type);
+ fib_table_notify(net, tb, nb);
}
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393cc0fd3..96b67a8b18c3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6324,7 +6324,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_free;
if (isn && tmp_opt.tstamp_ok)
- af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
+ af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
if (!want_cookie && !isn) {
/* VJ's idea. We save last timestamp seen
@@ -6366,7 +6366,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_release;
}
- isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
+ isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
}
if (!dst) {
dst = af_ops->route_req(sk, &fl, req, NULL);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 575e19dcc017..08d870e45658 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,12 +94,12 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
{
- return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
- ip_hdr(skb)->saddr,
- tcp_hdr(skb)->dest,
- tcp_hdr(skb)->source, tsoff);
+ return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr,
+ tcp_hdr(skb)->dest,
+ tcp_hdr(skb)->source, tsoff);
}
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -236,11 +236,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = NULL;
if (likely(!tp->repair)) {
- seq = secure_tcp_sequence_number(inet->inet_saddr,
- inet->inet_daddr,
- inet->inet_sport,
- usin->sin_port,
- &tp->tsoffset);
+ seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
+ inet->inet_daddr,
+ inet->inet_sport,
+ usin->sin_port,
+ &tp->tsoffset);
if (!tp->write_seq)
tp->write_seq = seq;
}
@@ -1253,7 +1253,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.cookie_init_seq = cookie_v4_init_sequence,
#endif
.route_req = tcp_v4_route_req,
- .init_seq = tcp_v4_init_sequence,
+ .init_seq_tsoff = tcp_v4_init_seq_and_tsoff,
.send_synack = tcp_v4_send_synack,
};
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 363172527e43..8c69768a5c46 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -245,6 +245,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
#endif
.enhanced_dad = 1,
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
+ .disable_policy = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -297,6 +298,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
#endif
.enhanced_dad = 1,
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
+ .disable_policy = 0,
};
/* Check if a valid qdisc is available */
@@ -944,6 +946,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
const struct in6_addr *peer_addr, int pfxlen,
int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
{
+ struct net *net = dev_net(idev->dev);
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
unsigned int hash;
@@ -990,6 +993,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
goto out;
}
+ if (net->ipv6.devconf_all->disable_policy ||
+ idev->cnf.disable_policy)
+ rt->dst.flags |= DST_NOPOLICY;
+
neigh_parms_data_state_setall(idev->nd_parms);
ifa->addr = *addr;
@@ -5003,6 +5010,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
#endif
array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
+ array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
}
static inline size_t inet6_ifla6_size(void)
@@ -5827,6 +5835,105 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
return ret;
}
+static
+void addrconf_set_nopolicy(struct rt6_info *rt, int action)
+{
+ if (rt) {
+ if (action)
+ rt->dst.flags |= DST_NOPOLICY;
+ else
+ rt->dst.flags &= ~DST_NOPOLICY;
+ }
+}
+
+static
+void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
+{
+ struct inet6_ifaddr *ifa;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ spin_lock(&ifa->lock);
+ if (ifa->rt) {
+ struct rt6_info *rt = ifa->rt;
+ struct fib6_table *table = rt->rt6i_table;
+ int cpu;
+
+ read_lock(&table->tb6_lock);
+ addrconf_set_nopolicy(ifa->rt, val);
+ if (rt->rt6i_pcpu) {
+ for_each_possible_cpu(cpu) {
+ struct rt6_info **rtp;
+
+ rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
+ addrconf_set_nopolicy(*rtp, val);
+ }
+ }
+ read_unlock(&table->tb6_lock);
+ }
+ spin_unlock(&ifa->lock);
+ }
+ read_unlock_bh(&idev->lock);
+}
+
+static
+int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
+{
+ struct inet6_dev *idev;
+ struct net *net;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ *valp = val;
+
+ net = (struct net *)ctl->extra2;
+ if (valp == &net->ipv6.devconf_dflt->disable_policy) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ if (valp == &net->ipv6.devconf_all->disable_policy) {
+ struct net_device *dev;
+
+ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev)
+ addrconf_disable_policy_idev(idev, val);
+ }
+ } else {
+ idev = (struct inet6_dev *)ctl->extra1;
+ addrconf_disable_policy_idev(idev, val);
+ }
+
+ rtnl_unlock();
+ return 0;
+}
+
+static
+int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int *valp = ctl->data;
+ int val = *valp;
+ loff_t pos = *ppos;
+ struct ctl_table lctl;
+ int ret;
+
+ lctl = *ctl;
+ lctl.data = &val;
+ ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
+
+ if (write && (*valp != val))
+ ret = addrconf_disable_policy(ctl, valp, val);
+
+ if (ret)
+ *ppos = pos;
+
+ return ret;
+}
+
static int minus_one = -1;
static const int one = 1;
static const int two_five_five = 255;
@@ -6185,6 +6292,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = addrconf_sysctl_addr_gen_mode,
},
{
+ .procname = "disable_policy",
+ .data = &ipv6_devconf.disable_policy,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_disable_policy,
+ },
+ {
/* sentinel */
}
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 49fa2e8c3fa9..c73a431fd06f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,12 +101,12 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
}
}
-static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
{
- return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
- ipv6_hdr(skb)->saddr.s6_addr32,
- tcp_hdr(skb)->dest,
- tcp_hdr(skb)->source, tsoff);
+ return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
+ ipv6_hdr(skb)->saddr.s6_addr32,
+ tcp_hdr(skb)->dest,
+ tcp_hdr(skb)->source, tsoff);
}
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
@@ -287,11 +287,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk_set_txhash(sk);
if (likely(!tp->repair)) {
- seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
- sk->sk_v6_daddr.s6_addr32,
- inet->inet_sport,
- inet->inet_dport,
- &tp->tsoffset);
+ seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+ inet->inet_sport,
+ inet->inet_dport,
+ &tp->tsoffset);
if (!tp->write_seq)
tp->write_seq = seq;
}
@@ -757,7 +757,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.cookie_init_seq = cookie_v6_init_sequence,
#endif
.route_req = tcp_v6_route_req,
- .init_seq = tcp_v6_init_sequence,
+ .init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
.send_synack = tcp_v6_send_synack,
};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401e3bc6..08a188ffe070 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -864,6 +864,64 @@ discard:
return 0;
}
+static struct sock *__udp6_lib_demux_lookup(struct net *net,
+ __be16 loc_port, const struct in6_addr *loc_addr,
+ __be16 rmt_port, const struct in6_addr *rmt_addr,
+ int dif)
+{
+ struct sock *sk;
+
+ rcu_read_lock();
+ sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
+ dif, &udp_table, NULL);
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ rcu_read_unlock();
+
+ return sk;
+}
+
+static void udp_v6_early_demux(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ const struct udphdr *uh;
+ struct sock *sk;
+ struct dst_entry *dst;
+ int dif = skb->dev->ifindex;
+
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr)))
+ return;
+
+ uh = udp_hdr(skb);
+
+ if (skb->pkt_type == PACKET_HOST)
+ sk = __udp6_lib_demux_lookup(net, uh->dest,
+ &ipv6_hdr(skb)->daddr,
+ uh->source, &ipv6_hdr(skb)->saddr,
+ dif);
+ else
+ return;
+
+ if (!sk)
+ return;
+
+ skb->sk = sk;
+ skb->destructor = sock_efree;
+ dst = READ_ONCE(sk->sk_rx_dst);
+
+ if (dst)
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+ if (dst) {
+ if (dst->flags & DST_NOCACHE) {
+ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+ skb_dst_set(skb, dst);
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
+ }
+}
+
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -1379,6 +1437,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
#endif
static const struct inet6_protocol udpv6_protocol = {
+ .early_demux = udp_v6_early_demux,
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 33211f9a2656..f7a08e5f9763 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -32,7 +32,9 @@
#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
static int zero = 0;
+static int one = 1;
static int label_limit = (1 << 20) - 1;
+static int ttl_max = 255;
static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
struct nlmsghdr *nlh, struct net *net, u32 portid,
@@ -220,8 +222,8 @@ out:
return &rt->rt_nh[nh_index];
}
-static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
- struct mpls_entry_decoded dec)
+static bool mpls_egress(struct net *net, struct mpls_route *rt,
+ struct sk_buff *skb, struct mpls_entry_decoded dec)
{
enum mpls_payload_type payload_type;
bool success = false;
@@ -246,22 +248,46 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
switch (payload_type) {
case MPT_IPV4: {
struct iphdr *hdr4 = ip_hdr(skb);
+ u8 new_ttl;
skb->protocol = htons(ETH_P_IP);
+
+ /* If propagating TTL, take the decremented TTL from
+ * the incoming MPLS header, otherwise decrement the
+ * TTL, but only if not 0 to avoid underflow.
+ */
+ if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
+ (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+ net->mpls.ip_ttl_propagate))
+ new_ttl = dec.ttl;
+ else
+ new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
+
csum_replace2(&hdr4->check,
htons(hdr4->ttl << 8),
- htons(dec.ttl << 8));
- hdr4->ttl = dec.ttl;
+ htons(new_ttl << 8));
+ hdr4->ttl = new_ttl;
success = true;
break;
}
case MPT_IPV6: {
struct ipv6hdr *hdr6 = ipv6_hdr(skb);
skb->protocol = htons(ETH_P_IPV6);
- hdr6->hop_limit = dec.ttl;
+
+ /* If propagating TTL, take the decremented TTL from
+ * the incoming MPLS header, otherwise decrement the
+ * hop limit, but only if not 0 to avoid underflow.
+ */
+ if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
+ (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+ net->mpls.ip_ttl_propagate))
+ hdr6->hop_limit = dec.ttl;
+ else if (hdr6->hop_limit)
+ hdr6->hop_limit = hdr6->hop_limit - 1;
success = true;
break;
}
case MPT_UNSPEC:
+ /* Should have decided which protocol it is by now */
break;
}
@@ -361,7 +387,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (unlikely(!new_header_size && dec.bos)) {
/* Penultimate hop popping */
- if (!mpls_egress(rt, skb, dec))
+ if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
goto err;
} else {
bool bos;
@@ -412,6 +438,7 @@ static struct packet_type mpls_packet_type __read_mostly = {
static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_OIF] = { .type = NLA_U32 },
+ [RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
};
struct mpls_route_config {
@@ -421,6 +448,7 @@ struct mpls_route_config {
u8 rc_via_alen;
u8 rc_via[MAX_VIA_ALEN];
u32 rc_label;
+ u8 rc_ttl_propagate;
u8 rc_output_labels;
u32 rc_output_label[MAX_NEW_LABELS];
u32 rc_nlflags;
@@ -856,6 +884,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
rt->rt_protocol = cfg->rc_protocol;
rt->rt_payload_type = cfg->rc_payload_type;
+ rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
if (cfg->rc_mp)
err = mpls_nh_build_multi(cfg, rt);
@@ -1577,6 +1606,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->rc_label = LABEL_NOT_SPECIFIED;
cfg->rc_protocol = rtm->rtm_protocol;
cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
+ cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
cfg->rc_nlflags = nlh->nlmsg_flags;
cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->rc_nlinfo.nlh = nlh;
@@ -1623,6 +1653,17 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->rc_mp_len = nla_len(nla);
break;
}
+ case RTA_TTL_PROPAGATE:
+ {
+ u8 ttl_propagate = nla_get_u8(nla);
+
+ if (ttl_propagate > 1)
+ goto errout;
+ cfg->rc_ttl_propagate = ttl_propagate ?
+ MPLS_TTL_PROP_ENABLED :
+ MPLS_TTL_PROP_DISABLED;
+ break;
+ }
default:
/* Unsupported attribute */
goto errout;
@@ -1683,6 +1724,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
if (nla_put_labels(skb, RTA_DST, 1, &label))
goto nla_put_failure;
+
+ if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
+ bool ttl_propagate =
+ rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
+
+ if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
+ ttl_propagate))
+ goto nla_put_failure;
+ }
if (rt->rt_nhn == 1) {
const struct mpls_nh *nh = rt->rt_nh;
@@ -1793,7 +1843,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
{
size_t payload =
NLMSG_ALIGN(sizeof(struct rtmsg))
- + nla_total_size(4); /* RTA_DST */
+ + nla_total_size(4) /* RTA_DST */
+ + nla_total_size(1); /* RTA_TTL_PROPAGATE */
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
@@ -1877,6 +1928,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
+ rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
rt0->rt_nh->nh_via_alen = lo->addr_len;
memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
@@ -1890,6 +1942,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
+ rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
rt2->rt_nh->nh_via_alen = lo->addr_len;
memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
@@ -1971,6 +2024,9 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
return ret;
}
+#define MPLS_NS_SYSCTL_OFFSET(field) \
+ (&((struct net *)0)->field)
+
static const struct ctl_table mpls_table[] = {
{
.procname = "platform_labels",
@@ -1979,21 +2035,47 @@ static const struct ctl_table mpls_table[] = {
.mode = 0644,
.proc_handler = mpls_platform_labels,
},
+ {
+ .procname = "ip_ttl_propagate",
+ .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "default_ttl",
+ .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &ttl_max,
+ },
{ }
};
static int mpls_net_init(struct net *net)
{
struct ctl_table *table;
+ int i;
net->mpls.platform_labels = 0;
net->mpls.platform_label = NULL;
+ net->mpls.ip_ttl_propagate = 1;
+ net->mpls.default_ttl = 255;
table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
if (table == NULL)
return -ENOMEM;
- table[0].data = net;
+ /* Table data contains only offsets relative to the base of
+ * the mdev at this point, so make them absolute.
+ */
+ for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
+ table[i].data = (char *)net + (uintptr_t)table[i].data;
+
net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
if (net->mpls.ctl == NULL) {
kfree(table);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 76360d8b9579..62928d8fabd1 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -90,6 +90,12 @@ struct mpls_nh { /* next hop label forwarding entry */
u8 nh_via_table;
};
+enum mpls_ttl_propagation {
+ MPLS_TTL_PROP_DEFAULT,
+ MPLS_TTL_PROP_ENABLED,
+ MPLS_TTL_PROP_DISABLED,
+};
+
/* The route, nexthops and vias are stored together in the same memory
* block:
*
@@ -116,6 +122,7 @@ struct mpls_route { /* next hop label forwarding entry */
u8 rt_protocol;
u8 rt_payload_type;
u8 rt_max_alen;
+ u8 rt_ttl_propagate;
unsigned int rt_nhn;
unsigned int rt_nhn_alive;
struct mpls_nh rt_nh[0];
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index e4e4424f9eb1..22f71fce0bfb 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -29,6 +29,7 @@
static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
[MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
+ [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 },
};
static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
@@ -49,6 +50,7 @@ static int mpls_xmit(struct sk_buff *skb)
struct rtable *rt = NULL;
struct rt6_info *rt6 = NULL;
struct mpls_dev *out_mdev;
+ struct net *net;
int err = 0;
bool bos;
int i;
@@ -56,17 +58,7 @@ static int mpls_xmit(struct sk_buff *skb)
/* Find the output device */
out_dev = dst->dev;
-
- /* Obtain the ttl */
- if (dst->ops->family == AF_INET) {
- ttl = ip_hdr(skb)->ttl;
- rt = (struct rtable *)dst;
- } else if (dst->ops->family == AF_INET6) {
- ttl = ipv6_hdr(skb)->hop_limit;
- rt6 = (struct rt6_info *)dst;
- } else {
- goto drop;
- }
+ net = dev_net(out_dev);
skb_orphan(skb);
@@ -78,6 +70,38 @@ static int mpls_xmit(struct sk_buff *skb)
tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
+ /* Obtain the ttl using the following set of rules.
+ *
+ * LWT ttl propagation setting:
+ * - disabled => use default TTL value from LWT
+ * - enabled => use TTL value from IPv4/IPv6 header
+ * - default =>
+ * Global ttl propagation setting:
+ * - disabled => use default TTL value from global setting
+ * - enabled => use TTL value from IPv4/IPv6 header
+ */
+ if (dst->ops->family == AF_INET) {
+ if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
+ ttl = tun_encap_info->default_ttl;
+ else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+ !net->mpls.ip_ttl_propagate)
+ ttl = net->mpls.default_ttl;
+ else
+ ttl = ip_hdr(skb)->ttl;
+ rt = (struct rtable *)dst;
+ } else if (dst->ops->family == AF_INET6) {
+ if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
+ ttl = tun_encap_info->default_ttl;
+ else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+ !net->mpls.ip_ttl_propagate)
+ ttl = net->mpls.default_ttl;
+ else
+ ttl = ipv6_hdr(skb)->hop_limit;
+ rt6 = (struct rt6_info *)dst;
+ } else {
+ goto drop;
+ }
+
/* Verify the destination can hold the packet */
new_header_size = mpls_encap_size(tun_encap_info);
mtu = mpls_dev_mtu(out_dev);
@@ -160,6 +184,17 @@ static int mpls_build_state(struct nlattr *nla,
&tun_encap_info->labels, tun_encap_info->label);
if (ret)
goto errout;
+
+ tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT;
+
+ if (tb[MPLS_IPTUNNEL_TTL]) {
+ tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
+ /* TTL 0 implies propagate from IP header */
+ tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ?
+ MPLS_TTL_PROP_DISABLED :
+ MPLS_TTL_PROP_ENABLED;
+ }
+
newts->type = LWTUNNEL_ENCAP_MPLS;
newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
newts->headroom = mpls_encap_size(tun_encap_info);
@@ -186,6 +221,10 @@ static int mpls_fill_encap_info(struct sk_buff *skb,
tun_encap_info->label))
goto nla_put_failure;
+ if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT &&
+ nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -195,10 +234,16 @@ nla_put_failure:
static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
{
struct mpls_iptunnel_encap *tun_encap_info;
+ int nlsize;
tun_encap_info = mpls_lwtunnel_encap(lwtstate);
- return nla_total_size(tun_encap_info->labels * 4);
+ nlsize = nla_total_size(tun_encap_info->labels * 4);
+
+ if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT)
+ nlsize += nla_total_size(1);
+
+ return nlsize;
}
static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
@@ -207,7 +252,9 @@ static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
int l;
- if (a_hdr->labels != b_hdr->labels)
+ if (a_hdr->labels != b_hdr->labels ||
+ a_hdr->ttl_propagate != b_hdr->ttl_propagate ||
+ a_hdr->default_ttl != b_hdr->default_ttl)
return 1;
for (l = 0; l < MAX_NEW_LABELS; l++)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 1c38d2c7caa8..80fb6f63e768 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -702,9 +702,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
event->param.conn.initiator_depth);
/* rdma_accept() calls rdma_reject() internally if it fails */
- err = rdma_accept(cm_id, &conn_param);
- if (err)
- rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
+ if (rdma_accept(cm_id, &conn_param))
+ rds_ib_conn_error(conn, "rdma_accept failed\n");
out:
if (conn)
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 4fe8f4fec4ee..86ef907067bb 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -78,17 +78,15 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
return ibmr;
out_no_cigar:
- if (ibmr) {
- if (fmr->fmr)
- ib_dealloc_fmr(fmr->fmr);
- kfree(ibmr);
- }
+ kfree(ibmr);
atomic_dec(&pool->item_count);
+
return ERR_PTR(err);
}
-int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
- struct scatterlist *sg, unsigned int nents)
+static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
+ struct rds_ib_mr *ibmr, struct scatterlist *sg,
+ unsigned int nents)
{
struct ib_device *dev = rds_ibdev->dev;
struct rds_ib_fmr *fmr = &ibmr->u.fmr;
@@ -114,29 +112,39 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
if (dma_addr & ~PAGE_MASK) {
- if (i > 0)
+ if (i > 0) {
+ ib_dma_unmap_sg(dev, sg, nents,
+ DMA_BIDIRECTIONAL);
return -EINVAL;
- else
+ } else {
++page_cnt;
+ }
}
if ((dma_addr + dma_len) & ~PAGE_MASK) {
- if (i < sg_dma_len - 1)
+ if (i < sg_dma_len - 1) {
+ ib_dma_unmap_sg(dev, sg, nents,
+ DMA_BIDIRECTIONAL);
return -EINVAL;
- else
+ } else {
++page_cnt;
+ }
}
len += dma_len;
}
page_cnt += len >> PAGE_SHIFT;
- if (page_cnt > ibmr->pool->fmr_attr.max_pages)
+ if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
return -EINVAL;
+ }
dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
rdsibdev_to_node(rds_ibdev));
- if (!dma_pages)
+ if (!dma_pages) {
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
return -ENOMEM;
+ }
page_cnt = 0;
for (i = 0; i < sg_dma_len; ++i) {
@@ -149,8 +157,10 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
}
ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
- if (ret)
+ if (ret) {
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
goto out;
+ }
/* Success - we successfully remapped the MR, so we can
* safely tear down the old mapping.
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 5d6e98a79a5e..0ea4ab017a8c 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -125,8 +125,6 @@ void rds_ib_mr_exit(void);
void __rds_ib_teardown_mr(struct rds_ib_mr *);
void rds_ib_teardown_mr(struct rds_ib_mr *);
struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int);
-int rds_ib_map_fmr(struct rds_ib_device *, struct rds_ib_mr *,
- struct scatterlist *, unsigned int);
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *);
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **);
struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bcf49cd22786..62567bfe52c7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -274,7 +274,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
return NULL;
}
-void qdisc_hash_add(struct Qdisc *q)
+void qdisc_hash_add(struct Qdisc *q, bool invisible)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
struct Qdisc *root = qdisc_dev(q)->qdisc;
@@ -282,6 +282,8 @@ void qdisc_hash_add(struct Qdisc *q)
WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
+ if (invisible)
+ q->flags |= TCQ_F_INVISIBLE;
}
}
EXPORT_SYMBOL(qdisc_hash_add);
@@ -1003,7 +1005,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
goto err_out4;
}
- qdisc_hash_add(sch);
+ qdisc_hash_add(sch, false);
return sch;
}
@@ -1401,9 +1403,14 @@ nla_put_failure:
return -1;
}
-static bool tc_qdisc_dump_ignore(struct Qdisc *q)
+static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
{
- return (q->flags & TCQ_F_BUILTIN) ? true : false;
+ if (q->flags & TCQ_F_BUILTIN)
+ return true;
+ if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
+ return true;
+
+ return false;
}
static int qdisc_notify(struct net *net, struct sk_buff *oskb,
@@ -1417,12 +1424,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (old && !tc_qdisc_dump_ignore(old)) {
+ if (old && !tc_qdisc_dump_ignore(old, false)) {
if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
0, RTM_DELQDISC) < 0)
goto err_out;
}
- if (new && !tc_qdisc_dump_ignore(new)) {
+ if (new && !tc_qdisc_dump_ignore(new, false)) {
if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
@@ -1439,7 +1446,8 @@ err_out:
static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
struct netlink_callback *cb,
- int *q_idx_p, int s_q_idx, bool recur)
+ int *q_idx_p, int s_q_idx, bool recur,
+ bool dump_invisible)
{
int ret = 0, q_idx = *q_idx_p;
struct Qdisc *q;
@@ -1452,7 +1460,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
if (q_idx < s_q_idx) {
q_idx++;
} else {
- if (!tc_qdisc_dump_ignore(q) &&
+ if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC) <= 0)
@@ -1474,7 +1482,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
q_idx++;
continue;
}
- if (!tc_qdisc_dump_ignore(q) &&
+ if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWQDISC) <= 0)
@@ -1496,12 +1504,21 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
int idx, q_idx;
int s_idx, s_q_idx;
struct net_device *dev;
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct tcmsg *tcm = nlmsg_data(nlh);
+ struct nlattr *tca[TCA_MAX + 1];
+ int err;
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
idx = 0;
ASSERT_RTNL();
+
+ err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL);
+ if (err < 0)
+ return err;
+
for_each_netdev(net, dev) {
struct netdev_queue *dev_queue;
@@ -1512,13 +1529,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
q_idx = 0;
if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
- true) < 0)
+ true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
- &q_idx, s_q_idx, false) < 0)
+ &q_idx, s_q_idx, false,
+ tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
cont:
@@ -1762,7 +1780,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
{
struct qdisc_dump_args arg;
- if (tc_qdisc_dump_ignore(q) ||
+ if (tc_qdisc_dump_ignore(q, false) ||
*t_p < s_t || !q->ops->cl_ops ||
(tcm->tcm_parent &&
TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d6ca18dc04c3..cf93e5ff3d63 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1161,6 +1161,8 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
sch->handle);
if (!q->link.q)
q->link.q = &noop_qdisc;
+ else
+ qdisc_hash_add(q->link.q, true);
q->link.priority = TC_CBQ_MAXPRIO - 1;
q->link.priority2 = TC_CBQ_MAXPRIO - 1;
@@ -1600,6 +1602,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
if (!cl->q)
cl->q = &noop_qdisc;
+ else
+ qdisc_hash_add(cl->q, true);
+
cl->common.classid = classid;
cl->tparent = parent;
cl->qdisc = sch;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index bb4cbdf75004..9fe67e257dfa 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -117,6 +117,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
&pfifo_qdisc_ops, classid);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
+ else
+ qdisc_hash_add(cl->qdisc, true);
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c2e5e8..1b98cb2160ff 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -368,6 +368,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
if (p->q == NULL)
p->q = &noop_qdisc;
+ else
+ qdisc_hash_add(p->q, true);
pr_debug("%s: qdisc %p\n", __func__, p->q);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b052b27a984e..3e64d23e098c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -795,7 +795,7 @@ static void attach_default_qdiscs(struct net_device *dev)
}
#ifdef CONFIG_NET_SCHED
if (dev->qdisc)
- qdisc_hash_add(dev->qdisc);
+ qdisc_hash_add(dev->qdisc, false);
#endif
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3ffaa6fb0990..0198c6cdda49 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1066,6 +1066,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
&pfifo_qdisc_ops, classid);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
+ else
+ qdisc_hash_add(cl->qdisc, true);
INIT_LIST_HEAD(&cl->children);
cl->vt_tree = RB_ROOT;
cl->cf_tree = RB_ROOT;
@@ -1425,6 +1427,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
sch->handle);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
+ else
+ qdisc_hash_add(q->root.qdisc, true);
INIT_LIST_HEAD(&q->root.children);
q->root.vt_tree = RB_ROOT;
q->root.cf_tree = RB_ROOT;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 4cd5fb134bc9..95867033542e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1460,6 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_class_hash_insert(&q->clhash, &cl->common);
if (parent)
parent->children++;
+ if (cl->un.leaf.q != &noop_qdisc)
+ qdisc_hash_add(cl->un.leaf.q, true);
} else {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 20b7f1646f69..cadfdd4f1e52 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -84,7 +84,7 @@ static void mq_attach(struct Qdisc *sch)
qdisc_destroy(old);
#ifdef CONFIG_NET_SCHED
if (ntx < dev->real_num_tx_queues)
- qdisc_hash_add(qdisc);
+ qdisc_hash_add(qdisc, false);
#endif
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 922683418e53..b851e209da4d 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -175,7 +175,7 @@ static void mqprio_attach(struct Qdisc *sch)
if (old)
qdisc_destroy(old);
if (ntx < dev->real_num_tx_queues)
- qdisc_hash_add(qdisc);
+ qdisc_hash_add(qdisc, false);
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index e7839a0d0eaa..43a3a10b3c81 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -217,6 +217,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
old = q->queues[i];
q->queues[i] = child;
+ if (child != &noop_qdisc)
+ qdisc_hash_add(child, true);
if (old != &noop_qdisc) {
qdisc_tree_reduce_backlog(old,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index d4d7db267b6e..92c2e6d448d7 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -192,8 +192,11 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
qdisc_destroy(child);
}
- for (i = oldbands; i < q->bands; i++)
+ for (i = oldbands; i < q->bands; i++) {
q->queues[i] = queues[i];
+ if (q->queues[i] != &noop_qdisc)
+ qdisc_hash_add(q->queues[i], true);
+ }
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f9e712ce2d15..6c85f3e9239b 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -494,6 +494,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
goto destroy_class;
}
+ if (cl->qdisc != &noop_qdisc)
+ qdisc_hash_add(cl->qdisc, true);
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
sch_tree_unlock(sch);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 249b2a18acbd..799ea6dd69b2 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -191,6 +191,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return PTR_ERR(child);
}
+ if (child != &noop_qdisc)
+ qdisc_hash_add(child, true);
sch_tree_lock(sch);
q->flags = ctl->flags;
q->limit = ctl->limit;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index fe6963d21519..ae862f172c94 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -513,6 +513,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
if (IS_ERR(child))
return PTR_ERR(child);
+ if (child != &noop_qdisc)
+ qdisc_hash_add(child, true);
sch_tree_lock(sch);
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 303355c449ab..9850126129a3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -396,6 +396,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
+ if (child != &noop_qdisc)
+ qdisc_hash_add(child, true);
}
q->limit = qopt->limit;
if (tb[TCA_TBF_PBURST])
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..ab1374fa5ab0 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3872,9 +3872,18 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net,
else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
reply = sctp_process_strreset_inreq(
(struct sctp_association *)asoc, param, &ev);
- /* More handles for other types will be added here, by now it
- * just ignores other types.
- */
+ else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
+ reply = sctp_process_strreset_tsnreq(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
+ reply = sctp_process_strreset_addstrm_out(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
+ reply = sctp_process_strreset_addstrm_in(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
+ reply = sctp_process_strreset_resp(
+ (struct sctp_association *)asoc, param, &ev);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f378ea2ae38..72cc3ecf6516 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3758,6 +3758,39 @@ out:
return retval;
}
+static int sctp_setsockopt_reconfig_supported(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(params))
+ goto out;
+
+ if (copy_from_user(&params, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ asoc->reconf_enable = !!params.assoc_value;
+ } else if (!params.assoc_id) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ sp->ep->reconf_enable = !!params.assoc_value;
+ } else {
+ goto out;
+ }
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_setsockopt_enable_strreset(struct sock *sk,
char __user *optval,
unsigned int optlen)
@@ -4038,6 +4071,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_DEFAULT_PRINFO:
retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
break;
+ case SCTP_RECONFIG_SUPPORTED:
+ retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
+ break;
case SCTP_ENABLE_STREAM_RESET:
retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
break;
@@ -6540,6 +6576,47 @@ out:
return retval;
}
+static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ params.assoc_value = asoc->reconf_enable;
+ } else if (!params.assoc_id) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ params.assoc_value = sp->ep->reconf_enable;
+ } else {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
@@ -6748,6 +6825,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
optlen);
break;
+ case SCTP_RECONFIG_SUPPORTED:
+ retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
+ optlen);
+ break;
case SCTP_ENABLE_STREAM_RESET:
retval = sctp_getsockopt_enable_strreset(sk, len, optval,
optlen);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..961d0a1e99d1 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -267,18 +267,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
stream->out = streamout;
}
- if (in) {
- struct sctp_stream_in *streamin;
-
- streamin = krealloc(stream->in, incnt * sizeof(*streamin),
- GFP_KERNEL);
- if (!streamin)
- goto out;
-
- memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
- stream->in = streamin;
- }
-
chunk = sctp_make_strreset_addstrm(asoc, out, in);
if (!chunk)
goto out;
@@ -303,13 +291,14 @@ out:
}
static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
- struct sctp_association *asoc, __u32 resp_seq)
+ struct sctp_association *asoc, __u32 resp_seq,
+ __be16 type)
{
struct sctp_chunk *chunk = asoc->strreset_chunk;
struct sctp_reconf_chunk *hdr;
union sctp_params param;
- if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk)
+ if (!chunk)
return NULL;
hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
@@ -320,7 +309,8 @@ static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
*/
struct sctp_strreset_tsnreq *req = param.v;
- if (req->request_seq == resp_seq)
+ if ((!resp_seq || req->request_seq == resp_seq) &&
+ (!type || type == req->param_hdr.type))
return param.v;
}
@@ -361,13 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
goto out;
if (asoc->strreset_chunk) {
- sctp_paramhdr_t *param_hdr;
- struct sctp_transport *t;
-
- param_hdr = sctp_chunk_lookup_strreset_param(
- asoc, outreq->response_seq);
- if (!param_hdr || param_hdr->type !=
- SCTP_PARAM_RESET_IN_REQUEST) {
+ if (!sctp_chunk_lookup_strreset_param(
+ asoc, outreq->response_seq,
+ SCTP_PARAM_RESET_IN_REQUEST)) {
/* same process with outstanding isn't 0 */
result = SCTP_STRRESET_ERR_IN_PROGRESS;
goto out;
@@ -377,6 +363,8 @@ struct sctp_chunk *sctp_process_strreset_outreq(
asoc->strreset_outseq++;
if (!asoc->strreset_outstanding) {
+ struct sctp_transport *t;
+
t = asoc->strreset_chunk->transport;
if (del_timer(&t->reconf_timer))
sctp_transport_put(t);
@@ -477,3 +465,367 @@ out:
return chunk;
}
+
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
+ struct sctp_strreset_tsnreq *tsnreq = param.v;
+ struct sctp_stream *stream = asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ __u32 request_seq;
+ __u16 i;
+
+ request_seq = ntohl(tsnreq->request_seq);
+ if (request_seq > asoc->strreset_inseq) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto out;
+ } else if (request_seq == asoc->strreset_inseq) {
+ asoc->strreset_inseq++;
+ }
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+ goto out;
+
+ if (asoc->strreset_outstanding) {
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ /* G3: The same processing as though a SACK chunk with no gap report
+ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+ * received MUST be performed.
+ */
+ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
+ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+
+ /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
+ * TSN that the peer should use to send the next DATA chunk. The
+ * value SHOULD be the smallest TSN not acknowledged by the
+ * receiver of the request plus 2^31.
+ */
+ init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
+ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ init_tsn, GFP_ATOMIC);
+
+ /* G4: The same processing as though a FWD-TSN chunk (as defined in
+ * [RFC3758]) with all streams affected and a new cumulative TSN
+ * ACK of the Receiver's Next TSN minus 1 were received MUST be
+ * performed.
+ */
+ sctp_outq_free(&asoc->outqueue);
+
+ /* G2: Compute an appropriate value for the local endpoint's next TSN,
+ * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
+ * chunk. The value SHOULD be the highest TSN sent by the receiver
+ * of the request plus 1.
+ */
+ next_tsn = asoc->next_tsn;
+ asoc->ctsn_ack_point = next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+ /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
+ * incoming and outgoing streams.
+ */
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].ssn = 0;
+ for (i = 0; i < stream->incnt; i++)
+ stream->in[i].ssn = 0;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+ *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
+ next_tsn, GFP_ATOMIC);
+
+out:
+ return sctp_make_strreset_tsnresp(asoc, result, request_seq,
+ next_tsn, init_tsn);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_addstrm *addstrm = param.v;
+ struct sctp_stream *stream = asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ struct sctp_stream_in *streamin;
+ __u32 request_seq, incnt;
+ __u16 in;
+
+ request_seq = ntohl(addstrm->request_seq);
+ if (request_seq > asoc->strreset_inseq) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto out;
+ } else if (request_seq == asoc->strreset_inseq) {
+ asoc->strreset_inseq++;
+ }
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+ goto out;
+
+ if (asoc->strreset_chunk) {
+ if (!sctp_chunk_lookup_strreset_param(
+ asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
+ /* same process with outstanding isn't 0 */
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ asoc->strreset_outstanding--;
+ asoc->strreset_outseq++;
+
+ if (!asoc->strreset_outstanding) {
+ struct sctp_transport *t;
+
+ t = asoc->strreset_chunk->transport;
+ if (del_timer(&t->reconf_timer))
+ sctp_transport_put(t);
+
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+ }
+
+ in = ntohs(addstrm->number_of_streams);
+ incnt = stream->incnt + in;
+ if (!in || incnt > SCTP_MAX_STREAM)
+ goto out;
+
+ streamin = krealloc(stream->in, incnt * sizeof(*streamin),
+ GFP_ATOMIC);
+ if (!streamin)
+ goto out;
+
+ memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
+ stream->in = streamin;
+ stream->incnt = incnt;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc,
+ 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
+
+out:
+ return sctp_make_strreset_resp(asoc, result, request_seq);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_addstrm *addstrm = param.v;
+ struct sctp_stream *stream = asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ struct sctp_stream_out *streamout;
+ struct sctp_chunk *chunk = NULL;
+ __u32 request_seq, outcnt;
+ __u16 out;
+
+ request_seq = ntohl(addstrm->request_seq);
+ if (request_seq > asoc->strreset_inseq) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto out;
+ } else if (request_seq == asoc->strreset_inseq) {
+ asoc->strreset_inseq++;
+ }
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+ goto out;
+
+ if (asoc->strreset_outstanding) {
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ out = ntohs(addstrm->number_of_streams);
+ outcnt = stream->outcnt + out;
+ if (!out || outcnt > SCTP_MAX_STREAM)
+ goto out;
+
+ streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
+ GFP_ATOMIC);
+ if (!streamout)
+ goto out;
+
+ memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
+ stream->out = streamout;
+
+ chunk = sctp_make_strreset_addstrm(asoc, out, 0);
+ if (!chunk)
+ goto out;
+
+ asoc->strreset_chunk = chunk;
+ asoc->strreset_outstanding = 1;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ stream->outcnt = outcnt;
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc,
+ 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
+
+out:
+ if (!chunk)
+ chunk = sctp_make_strreset_resp(asoc, result, request_seq);
+
+ return chunk;
+}
+
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_resp *resp = param.v;
+ struct sctp_stream *stream = asoc->stream;
+ struct sctp_transport *t;
+ __u16 i, nums, flags = 0;
+ sctp_paramhdr_t *req;
+ __u32 result;
+
+ req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
+ if (!req)
+ return NULL;
+
+ result = ntohl(resp->result);
+ if (result != SCTP_STRRESET_PERFORMED) {
+ /* if in progress, do nothing but retransmit */
+ if (result == SCTP_STRRESET_IN_PROGRESS)
+ return NULL;
+ else if (result == SCTP_STRRESET_DENIED)
+ flags = SCTP_STREAM_RESET_DENIED;
+ else
+ flags = SCTP_STREAM_RESET_FAILED;
+ }
+
+ if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
+ struct sctp_strreset_outreq *outreq;
+ __u16 *str_p = NULL;
+
+ outreq = (struct sctp_strreset_outreq *)req;
+ nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+
+ if (result == SCTP_STRRESET_PERFORMED) {
+ if (nums) {
+ str_p = outreq->list_of_streams;
+ for (i = 0; i < nums; i++)
+ stream->out[ntohs(str_p[i])].ssn = 0;
+ } else {
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].ssn = 0;
+ }
+
+ flags = SCTP_STREAM_RESET_OUTGOING_SSN;
+ }
+
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
+
+ *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+ nums, str_p, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
+ struct sctp_strreset_inreq *inreq;
+ __u16 *str_p = NULL;
+
+ /* if the result is performed, it's impossible for inreq */
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+
+ inreq = (struct sctp_strreset_inreq *)req;
+ nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+
+ str_p = inreq->list_of_streams;
+ *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+ nums, str_p, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
+ struct sctp_strreset_resptsn *resptsn;
+ __u32 stsn, rtsn;
+
+ /* check for resptsn, as sctp_verify_reconf didn't do it*/
+ if (ntohs(param.p->length) != sizeof(*resptsn))
+ return NULL;
+
+ resptsn = (struct sctp_strreset_resptsn *)resp;
+ stsn = ntohl(resptsn->senders_next_tsn);
+ rtsn = ntohl(resptsn->receivers_next_tsn);
+
+ if (result == SCTP_STRRESET_PERFORMED) {
+ __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
+ &asoc->peer.tsn_map);
+
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
+ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+
+ sctp_tsnmap_init(&asoc->peer.tsn_map,
+ SCTP_TSN_MAP_INITIAL,
+ stsn, GFP_ATOMIC);
+
+ sctp_outq_free(&asoc->outqueue);
+
+ asoc->next_tsn = rtsn;
+ asoc->ctsn_ack_point = asoc->next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].ssn = 0;
+ for (i = 0; i < stream->incnt; i++)
+ stream->in[i].ssn = 0;
+ }
+
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
+
+ *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
+ stsn, rtsn, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
+ struct sctp_strreset_addstrm *addstrm;
+ __u16 number;
+
+ addstrm = (struct sctp_strreset_addstrm *)req;
+ nums = ntohs(addstrm->number_of_streams);
+ number = stream->outcnt - nums;
+
+ if (result == SCTP_STRRESET_PERFORMED)
+ for (i = number; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
+ else
+ stream->outcnt = number;
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+ 0, nums, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
+ struct sctp_strreset_addstrm *addstrm;
+
+ /* if the result is performed, it's impossible for addstrm in
+ * request.
+ */
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+
+ addstrm = (struct sctp_strreset_addstrm *)req;
+ nums = ntohs(addstrm->number_of_streams);
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+ nums, 0, GFP_ATOMIC);
+ }
+
+ asoc->strreset_outstanding--;
+ asoc->strreset_outseq++;
+
+ /* remove everything for this reconf request */
+ if (!asoc->strreset_outstanding) {
+ t = asoc->strreset_chunk->transport;
+ if (del_timer(&t->reconf_timer))
+ sctp_transport_put(t);
+
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+
+ return NULL;
+}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index daf8554fd42a..0e732f68c2bf 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,6 +275,13 @@ static struct ctl_table sctp_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "reconf_enable",
+ .data = &init_net.sctp.reconf_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "auth_enable",
.data = &init_net.sctp.auth_enable,
.maxlen = sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index c8881bc542a0..ec2b3e013c2f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -883,6 +883,62 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
return event;
}
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags, __u32 local_tsn,
+ __u32 remote_tsn, gfp_t gfp)
+{
+ struct sctp_assoc_reset_event *areset;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ areset = (struct sctp_assoc_reset_event *)
+ skb_put(skb, sizeof(struct sctp_assoc_reset_event));
+
+ areset->assocreset_type = SCTP_ASSOC_RESET_EVENT;
+ areset->assocreset_flags = flags;
+ areset->assocreset_length = sizeof(struct sctp_assoc_reset_event);
+ sctp_ulpevent_set_owner(event, asoc);
+ areset->assocreset_assoc_id = sctp_assoc2id(asoc);
+ areset->assocreset_local_tsn = local_tsn;
+ areset->assocreset_remote_tsn = remote_tsn;
+
+ return event;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp)
+{
+ struct sctp_stream_change_event *schange;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ schange = (struct sctp_stream_change_event *)
+ skb_put(skb, sizeof(struct sctp_stream_change_event));
+
+ schange->strchange_type = SCTP_STREAM_CHANGE_EVENT;
+ schange->strchange_flags = flags;
+ schange->strchange_length = sizeof(struct sctp_stream_change_event);
+ sctp_ulpevent_set_owner(event, asoc);
+ schange->strchange_assoc_id = sctp_assoc2id(asoc);
+ schange->strchange_instrms = strchange_instrms;
+ schange->strchange_outstrms = strchange_outstrms;
+
+ return event;
+}
+
/* Return the notification type, assuming this is a notification
* event.
*/
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh
index 4aa5369ffa4e..d85968cb1bf2 100755
--- a/tools/hv/bondvf.sh
+++ b/tools/hv/bondvf.sh
@@ -101,9 +101,25 @@ function create_bond_cfg_redhat {
echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn
}
+function del_eth_cfg_ubuntu {
+ local fn=$cfgdir/interfaces
+ local tmpfl=$(mktemp)
+
+ local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1
+ local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)'
+
+ awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl
+
+ cp $tmpfl $fn
+
+ rm $tmpfl
+}
+
function create_eth_cfg_ubuntu {
local fn=$cfgdir/interfaces
+ del_eth_cfg_ubuntu $1
+
echo $'\n'auto $1 >>$fn
echo iface $1 inet manual >>$fn
echo bond-master $2 >>$fn
@@ -119,6 +135,8 @@ function create_eth_cfg_pri_ubuntu {
function create_bond_cfg_ubuntu {
local fn=$cfgdir/interfaces
+ del_eth_cfg_ubuntu $1
+
echo $'\n'auto $1 >>$fn
echo iface $1 inet dhcp >>$fn
echo bond-mode active-backup >>$fn